Execution.cpp 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147
  1. //===-- Execution.cpp - Implement code to simulate the program ------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the actual instruction interpreter.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "Interpreter.h"
  14. #include "llvm/ADT/APInt.h"
  15. #include "llvm/ADT/Statistic.h"
  16. #include "llvm/CodeGen/IntrinsicLowering.h"
  17. #include "llvm/IR/Constants.h"
  18. #include "llvm/IR/DerivedTypes.h"
  19. #include "llvm/IR/GetElementPtrTypeIterator.h"
  20. #include "llvm/IR/Instructions.h"
  21. #include "llvm/Support/CommandLine.h"
  22. #include "llvm/Support/Debug.h"
  23. #include "llvm/Support/ErrorHandling.h"
  24. #include "llvm/Support/MathExtras.h"
  25. #include "llvm/Support/raw_ostream.h"
  26. #include <algorithm>
  27. #include <cmath>
  28. using namespace llvm;
  29. #define DEBUG_TYPE "interpreter"
  30. STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
  31. static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
  32. cl::desc("make the interpreter print every volatile load and store"));
  33. //===----------------------------------------------------------------------===//
  34. // Various Helper Functions
  35. //===----------------------------------------------------------------------===//
  36. static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
  37. SF.Values[V] = Val;
  38. }
  39. //===----------------------------------------------------------------------===//
  40. // Binary Instruction Implementations
  41. //===----------------------------------------------------------------------===//
  42. #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
  43. case Type::TY##TyID: \
  44. Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
  45. break
  46. static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
  47. GenericValue Src2, Type *Ty) {
  48. switch (Ty->getTypeID()) {
  49. IMPLEMENT_BINARY_OPERATOR(+, Float);
  50. IMPLEMENT_BINARY_OPERATOR(+, Double);
  51. default:
  52. dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
  53. llvm_unreachable(nullptr);
  54. }
  55. }
  56. static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
  57. GenericValue Src2, Type *Ty) {
  58. switch (Ty->getTypeID()) {
  59. IMPLEMENT_BINARY_OPERATOR(-, Float);
  60. IMPLEMENT_BINARY_OPERATOR(-, Double);
  61. default:
  62. dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
  63. llvm_unreachable(nullptr);
  64. }
  65. }
  66. static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
  67. GenericValue Src2, Type *Ty) {
  68. switch (Ty->getTypeID()) {
  69. IMPLEMENT_BINARY_OPERATOR(*, Float);
  70. IMPLEMENT_BINARY_OPERATOR(*, Double);
  71. default:
  72. dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
  73. llvm_unreachable(nullptr);
  74. }
  75. }
  76. static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
  77. GenericValue Src2, Type *Ty) {
  78. switch (Ty->getTypeID()) {
  79. IMPLEMENT_BINARY_OPERATOR(/, Float);
  80. IMPLEMENT_BINARY_OPERATOR(/, Double);
  81. default:
  82. dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
  83. llvm_unreachable(nullptr);
  84. }
  85. }
  86. static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
  87. GenericValue Src2, Type *Ty) {
  88. switch (Ty->getTypeID()) {
  89. case Type::FloatTyID:
  90. Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
  91. break;
  92. case Type::DoubleTyID:
  93. Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
  94. break;
  95. default:
  96. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  97. llvm_unreachable(nullptr);
  98. }
  99. }
  100. #define IMPLEMENT_INTEGER_ICMP(OP, TY) \
  101. case Type::IntegerTyID: \
  102. Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
  103. break;
  104. #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
  105. case Type::VectorTyID: { \
  106. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  107. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  108. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  109. Dest.AggregateVal[_i].IntVal = APInt(1, \
  110. Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
  111. } break;
  112. // Handle pointers specially because they must be compared with only as much
  113. // width as the host has. We _do not_ want to be comparing 64 bit values when
  114. // running on a 32-bit target, otherwise the upper 32 bits might mess up
  115. // comparisons if they contain garbage.
  116. #define IMPLEMENT_POINTER_ICMP(OP) \
  117. case Type::PointerTyID: \
  118. Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
  119. (void*)(intptr_t)Src2.PointerVal); \
  120. break;
  121. static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
  122. Type *Ty) {
  123. GenericValue Dest;
  124. switch (Ty->getTypeID()) {
  125. IMPLEMENT_INTEGER_ICMP(eq,Ty);
  126. IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
  127. IMPLEMENT_POINTER_ICMP(==);
  128. default:
  129. dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
  130. llvm_unreachable(nullptr);
  131. }
  132. return Dest;
  133. }
  134. static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
  135. Type *Ty) {
  136. GenericValue Dest;
  137. switch (Ty->getTypeID()) {
  138. IMPLEMENT_INTEGER_ICMP(ne,Ty);
  139. IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
  140. IMPLEMENT_POINTER_ICMP(!=);
  141. default:
  142. dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
  143. llvm_unreachable(nullptr);
  144. }
  145. return Dest;
  146. }
  147. static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
  148. Type *Ty) {
  149. GenericValue Dest;
  150. switch (Ty->getTypeID()) {
  151. IMPLEMENT_INTEGER_ICMP(ult,Ty);
  152. IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
  153. IMPLEMENT_POINTER_ICMP(<);
  154. default:
  155. dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
  156. llvm_unreachable(nullptr);
  157. }
  158. return Dest;
  159. }
  160. static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
  161. Type *Ty) {
  162. GenericValue Dest;
  163. switch (Ty->getTypeID()) {
  164. IMPLEMENT_INTEGER_ICMP(slt,Ty);
  165. IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
  166. IMPLEMENT_POINTER_ICMP(<);
  167. default:
  168. dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
  169. llvm_unreachable(nullptr);
  170. }
  171. return Dest;
  172. }
  173. static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
  174. Type *Ty) {
  175. GenericValue Dest;
  176. switch (Ty->getTypeID()) {
  177. IMPLEMENT_INTEGER_ICMP(ugt,Ty);
  178. IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
  179. IMPLEMENT_POINTER_ICMP(>);
  180. default:
  181. dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
  182. llvm_unreachable(nullptr);
  183. }
  184. return Dest;
  185. }
  186. static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
  187. Type *Ty) {
  188. GenericValue Dest;
  189. switch (Ty->getTypeID()) {
  190. IMPLEMENT_INTEGER_ICMP(sgt,Ty);
  191. IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
  192. IMPLEMENT_POINTER_ICMP(>);
  193. default:
  194. dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
  195. llvm_unreachable(nullptr);
  196. }
  197. return Dest;
  198. }
  199. static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
  200. Type *Ty) {
  201. GenericValue Dest;
  202. switch (Ty->getTypeID()) {
  203. IMPLEMENT_INTEGER_ICMP(ule,Ty);
  204. IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
  205. IMPLEMENT_POINTER_ICMP(<=);
  206. default:
  207. dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
  208. llvm_unreachable(nullptr);
  209. }
  210. return Dest;
  211. }
  212. static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
  213. Type *Ty) {
  214. GenericValue Dest;
  215. switch (Ty->getTypeID()) {
  216. IMPLEMENT_INTEGER_ICMP(sle,Ty);
  217. IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
  218. IMPLEMENT_POINTER_ICMP(<=);
  219. default:
  220. dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
  221. llvm_unreachable(nullptr);
  222. }
  223. return Dest;
  224. }
  225. static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
  226. Type *Ty) {
  227. GenericValue Dest;
  228. switch (Ty->getTypeID()) {
  229. IMPLEMENT_INTEGER_ICMP(uge,Ty);
  230. IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
  231. IMPLEMENT_POINTER_ICMP(>=);
  232. default:
  233. dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
  234. llvm_unreachable(nullptr);
  235. }
  236. return Dest;
  237. }
  238. static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
  239. Type *Ty) {
  240. GenericValue Dest;
  241. switch (Ty->getTypeID()) {
  242. IMPLEMENT_INTEGER_ICMP(sge,Ty);
  243. IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
  244. IMPLEMENT_POINTER_ICMP(>=);
  245. default:
  246. dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
  247. llvm_unreachable(nullptr);
  248. }
  249. return Dest;
  250. }
  251. void Interpreter::visitICmpInst(ICmpInst &I) {
  252. ExecutionContext &SF = ECStack.back();
  253. Type *Ty = I.getOperand(0)->getType();
  254. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  255. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  256. GenericValue R; // Result
  257. switch (I.getPredicate()) {
  258. case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
  259. case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
  260. case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
  261. case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
  262. case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
  263. case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
  264. case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
  265. case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
  266. case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
  267. case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
  268. default:
  269. dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
  270. llvm_unreachable(nullptr);
  271. }
  272. SetValue(&I, R, SF);
  273. }
  274. #define IMPLEMENT_FCMP(OP, TY) \
  275. case Type::TY##TyID: \
  276. Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
  277. break
  278. #define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
  279. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  280. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  281. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  282. Dest.AggregateVal[_i].IntVal = APInt(1, \
  283. Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
  284. break;
  285. #define IMPLEMENT_VECTOR_FCMP(OP) \
  286. case Type::VectorTyID: \
  287. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
  288. IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
  289. } else { \
  290. IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
  291. }
  292. static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
  293. Type *Ty) {
  294. GenericValue Dest;
  295. switch (Ty->getTypeID()) {
  296. IMPLEMENT_FCMP(==, Float);
  297. IMPLEMENT_FCMP(==, Double);
  298. IMPLEMENT_VECTOR_FCMP(==);
  299. default:
  300. dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
  301. llvm_unreachable(nullptr);
  302. }
  303. return Dest;
  304. }
  305. #define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
  306. if (TY->isFloatTy()) { \
  307. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  308. Dest.IntVal = APInt(1,false); \
  309. return Dest; \
  310. } \
  311. } else { \
  312. if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  313. Dest.IntVal = APInt(1,false); \
  314. return Dest; \
  315. } \
  316. }
  317. #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
  318. assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
  319. Dest.AggregateVal.resize( X.AggregateVal.size() ); \
  320. for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
  321. if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
  322. Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
  323. Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
  324. else { \
  325. Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
  326. } \
  327. }
  328. #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
  329. if (TY->isVectorTy()) { \
  330. if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
  331. MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
  332. } else { \
  333. MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
  334. } \
  335. } \
  336. static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
  337. Type *Ty)
  338. {
  339. GenericValue Dest;
  340. // if input is scalar value and Src1 or Src2 is NaN return false
  341. IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
  342. // if vector input detect NaNs and fill mask
  343. MASK_VECTOR_NANS(Ty, Src1, Src2, false)
  344. GenericValue DestMask = Dest;
  345. switch (Ty->getTypeID()) {
  346. IMPLEMENT_FCMP(!=, Float);
  347. IMPLEMENT_FCMP(!=, Double);
  348. IMPLEMENT_VECTOR_FCMP(!=);
  349. default:
  350. dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
  351. llvm_unreachable(nullptr);
  352. }
  353. // in vector case mask out NaN elements
  354. if (Ty->isVectorTy())
  355. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  356. if (DestMask.AggregateVal[_i].IntVal == false)
  357. Dest.AggregateVal[_i].IntVal = APInt(1,false);
  358. return Dest;
  359. }
  360. static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
  361. Type *Ty) {
  362. GenericValue Dest;
  363. switch (Ty->getTypeID()) {
  364. IMPLEMENT_FCMP(<=, Float);
  365. IMPLEMENT_FCMP(<=, Double);
  366. IMPLEMENT_VECTOR_FCMP(<=);
  367. default:
  368. dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
  369. llvm_unreachable(nullptr);
  370. }
  371. return Dest;
  372. }
  373. static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
  374. Type *Ty) {
  375. GenericValue Dest;
  376. switch (Ty->getTypeID()) {
  377. IMPLEMENT_FCMP(>=, Float);
  378. IMPLEMENT_FCMP(>=, Double);
  379. IMPLEMENT_VECTOR_FCMP(>=);
  380. default:
  381. dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
  382. llvm_unreachable(nullptr);
  383. }
  384. return Dest;
  385. }
  386. static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
  387. Type *Ty) {
  388. GenericValue Dest;
  389. switch (Ty->getTypeID()) {
  390. IMPLEMENT_FCMP(<, Float);
  391. IMPLEMENT_FCMP(<, Double);
  392. IMPLEMENT_VECTOR_FCMP(<);
  393. default:
  394. dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
  395. llvm_unreachable(nullptr);
  396. }
  397. return Dest;
  398. }
  399. static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
  400. Type *Ty) {
  401. GenericValue Dest;
  402. switch (Ty->getTypeID()) {
  403. IMPLEMENT_FCMP(>, Float);
  404. IMPLEMENT_FCMP(>, Double);
  405. IMPLEMENT_VECTOR_FCMP(>);
  406. default:
  407. dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
  408. llvm_unreachable(nullptr);
  409. }
  410. return Dest;
  411. }
  412. #define IMPLEMENT_UNORDERED(TY, X,Y) \
  413. if (TY->isFloatTy()) { \
  414. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  415. Dest.IntVal = APInt(1,true); \
  416. return Dest; \
  417. } \
  418. } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  419. Dest.IntVal = APInt(1,true); \
  420. return Dest; \
  421. }
  422. #define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
  423. if (TY->isVectorTy()) { \
  424. GenericValue DestMask = Dest; \
  425. Dest = FUNC(Src1, Src2, Ty); \
  426. for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
  427. if (DestMask.AggregateVal[_i].IntVal == true) \
  428. Dest.AggregateVal[_i].IntVal = APInt(1, true); \
  429. return Dest; \
  430. }
  431. static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
  432. Type *Ty) {
  433. GenericValue Dest;
  434. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  435. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  436. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
  437. return executeFCMP_OEQ(Src1, Src2, Ty);
  438. }
  439. static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
  440. Type *Ty) {
  441. GenericValue Dest;
  442. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  443. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  444. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
  445. return executeFCMP_ONE(Src1, Src2, Ty);
  446. }
  447. static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
  448. Type *Ty) {
  449. GenericValue Dest;
  450. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  451. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  452. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
  453. return executeFCMP_OLE(Src1, Src2, Ty);
  454. }
  455. static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
  456. Type *Ty) {
  457. GenericValue Dest;
  458. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  459. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  460. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
  461. return executeFCMP_OGE(Src1, Src2, Ty);
  462. }
  463. static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
  464. Type *Ty) {
  465. GenericValue Dest;
  466. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  467. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  468. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
  469. return executeFCMP_OLT(Src1, Src2, Ty);
  470. }
  471. static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
  472. Type *Ty) {
  473. GenericValue Dest;
  474. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  475. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  476. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
  477. return executeFCMP_OGT(Src1, Src2, Ty);
  478. }
  479. static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
  480. Type *Ty) {
  481. GenericValue Dest;
  482. if(Ty->isVectorTy()) {
  483. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  484. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  485. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  486. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  487. Dest.AggregateVal[_i].IntVal = APInt(1,
  488. ( (Src1.AggregateVal[_i].FloatVal ==
  489. Src1.AggregateVal[_i].FloatVal) &&
  490. (Src2.AggregateVal[_i].FloatVal ==
  491. Src2.AggregateVal[_i].FloatVal)));
  492. } else {
  493. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  494. Dest.AggregateVal[_i].IntVal = APInt(1,
  495. ( (Src1.AggregateVal[_i].DoubleVal ==
  496. Src1.AggregateVal[_i].DoubleVal) &&
  497. (Src2.AggregateVal[_i].DoubleVal ==
  498. Src2.AggregateVal[_i].DoubleVal)));
  499. }
  500. } else if (Ty->isFloatTy())
  501. Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
  502. Src2.FloatVal == Src2.FloatVal));
  503. else {
  504. Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
  505. Src2.DoubleVal == Src2.DoubleVal));
  506. }
  507. return Dest;
  508. }
  509. static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
  510. Type *Ty) {
  511. GenericValue Dest;
  512. if(Ty->isVectorTy()) {
  513. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  514. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  515. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  516. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  517. Dest.AggregateVal[_i].IntVal = APInt(1,
  518. ( (Src1.AggregateVal[_i].FloatVal !=
  519. Src1.AggregateVal[_i].FloatVal) ||
  520. (Src2.AggregateVal[_i].FloatVal !=
  521. Src2.AggregateVal[_i].FloatVal)));
  522. } else {
  523. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  524. Dest.AggregateVal[_i].IntVal = APInt(1,
  525. ( (Src1.AggregateVal[_i].DoubleVal !=
  526. Src1.AggregateVal[_i].DoubleVal) ||
  527. (Src2.AggregateVal[_i].DoubleVal !=
  528. Src2.AggregateVal[_i].DoubleVal)));
  529. }
  530. } else if (Ty->isFloatTy())
  531. Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
  532. Src2.FloatVal != Src2.FloatVal));
  533. else {
  534. Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
  535. Src2.DoubleVal != Src2.DoubleVal));
  536. }
  537. return Dest;
  538. }
  539. static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
  540. const Type *Ty, const bool val) {
  541. GenericValue Dest;
  542. if(Ty->isVectorTy()) {
  543. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  544. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  545. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  546. Dest.AggregateVal[_i].IntVal = APInt(1,val);
  547. } else {
  548. Dest.IntVal = APInt(1, val);
  549. }
  550. return Dest;
  551. }
  552. void Interpreter::visitFCmpInst(FCmpInst &I) {
  553. ExecutionContext &SF = ECStack.back();
  554. Type *Ty = I.getOperand(0)->getType();
  555. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  556. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  557. GenericValue R; // Result
  558. switch (I.getPredicate()) {
  559. default:
  560. dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
  561. llvm_unreachable(nullptr);
  562. break;
  563. case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
  564. break;
  565. case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
  566. break;
  567. case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
  568. case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
  569. case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
  570. case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
  571. case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
  572. case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
  573. case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
  574. case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
  575. case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
  576. case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
  577. case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
  578. case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
  579. case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
  580. case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
  581. }
  582. SetValue(&I, R, SF);
  583. }
  584. static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
  585. GenericValue Src2, Type *Ty) {
  586. GenericValue Result;
  587. switch (predicate) {
  588. case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
  589. case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
  590. case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
  591. case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
  592. case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
  593. case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
  594. case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
  595. case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
  596. case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
  597. case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
  598. case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
  599. case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
  600. case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
  601. case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
  602. case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
  603. case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
  604. case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
  605. case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
  606. case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
  607. case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
  608. case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
  609. case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
  610. case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
  611. case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
  612. case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
  613. case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
  614. default:
  615. dbgs() << "Unhandled Cmp predicate\n";
  616. llvm_unreachable(nullptr);
  617. }
  618. }
  619. void Interpreter::visitBinaryOperator(BinaryOperator &I) {
  620. ExecutionContext &SF = ECStack.back();
  621. Type *Ty = I.getOperand(0)->getType();
  622. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  623. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  624. GenericValue R; // Result
  625. // First process vector operation
  626. if (Ty->isVectorTy()) {
  627. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  628. R.AggregateVal.resize(Src1.AggregateVal.size());
  629. // Macros to execute binary operation 'OP' over integer vectors
  630. #define INTEGER_VECTOR_OPERATION(OP) \
  631. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  632. R.AggregateVal[i].IntVal = \
  633. Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
  634. // Additional macros to execute binary operations udiv/sdiv/urem/srem since
  635. // they have different notation.
  636. #define INTEGER_VECTOR_FUNCTION(OP) \
  637. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  638. R.AggregateVal[i].IntVal = \
  639. Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
  640. // Macros to execute binary operation 'OP' over floating point type TY
  641. // (float or double) vectors
  642. #define FLOAT_VECTOR_FUNCTION(OP, TY) \
  643. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  644. R.AggregateVal[i].TY = \
  645. Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
  646. // Macros to choose appropriate TY: float or double and run operation
  647. // execution
  648. #define FLOAT_VECTOR_OP(OP) { \
  649. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
  650. FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
  651. else { \
  652. if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
  653. FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
  654. else { \
  655. dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
  656. llvm_unreachable(0); \
  657. } \
  658. } \
  659. }
  660. switch(I.getOpcode()){
  661. default:
  662. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  663. llvm_unreachable(nullptr);
  664. break;
  665. case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
  666. case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
  667. case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
  668. case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
  669. case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
  670. case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
  671. case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
  672. case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
  673. case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
  674. case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
  675. case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
  676. case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
  677. case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
  678. case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
  679. case Instruction::FRem:
  680. if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
  681. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  682. R.AggregateVal[i].FloatVal =
  683. fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
  684. else {
  685. if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
  686. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  687. R.AggregateVal[i].DoubleVal =
  688. fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
  689. else {
  690. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  691. llvm_unreachable(nullptr);
  692. }
  693. }
  694. break;
  695. }
  696. } else {
  697. switch (I.getOpcode()) {
  698. default:
  699. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  700. llvm_unreachable(nullptr);
  701. break;
  702. case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
  703. case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
  704. case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
  705. case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
  706. case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
  707. case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
  708. case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
  709. case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
  710. case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
  711. case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
  712. case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
  713. case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
  714. case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
  715. case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
  716. case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
  717. }
  718. }
  719. SetValue(&I, R, SF);
  720. }
  721. static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
  722. GenericValue Src3, const Type *Ty) {
  723. GenericValue Dest;
  724. if(Ty->isVectorTy()) {
  725. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  726. assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
  727. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  728. for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
  729. Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
  730. Src3.AggregateVal[i] : Src2.AggregateVal[i];
  731. } else {
  732. Dest = (Src1.IntVal == 0) ? Src3 : Src2;
  733. }
  734. return Dest;
  735. }
  736. void Interpreter::visitSelectInst(SelectInst &I) {
  737. ExecutionContext &SF = ECStack.back();
  738. const Type * Ty = I.getOperand(0)->getType();
  739. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  740. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  741. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  742. GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
  743. SetValue(&I, R, SF);
  744. }
  745. //===----------------------------------------------------------------------===//
  746. // Terminator Instruction Implementations
  747. //===----------------------------------------------------------------------===//
  748. void Interpreter::exitCalled(GenericValue GV) {
  749. // runAtExitHandlers() assumes there are no stack frames, but
  750. // if exit() was called, then it had a stack frame. Blow away
  751. // the stack before interpreting atexit handlers.
  752. ECStack.clear();
  753. runAtExitHandlers();
  754. exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
  755. }
  756. /// Pop the last stack frame off of ECStack and then copy the result
  757. /// back into the result variable if we are not returning void. The
  758. /// result variable may be the ExitValue, or the Value of the calling
  759. /// CallInst if there was a previous stack frame. This method may
  760. /// invalidate any ECStack iterators you have. This method also takes
  761. /// care of switching to the normal destination BB, if we are returning
  762. /// from an invoke.
  763. ///
  764. void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
  765. GenericValue Result) {
  766. // Pop the current stack frame.
  767. ECStack.pop_back();
  768. if (ECStack.empty()) { // Finished main. Put result into exit code...
  769. if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
  770. ExitValue = Result; // Capture the exit value of the program
  771. } else {
  772. memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
  773. }
  774. } else {
  775. // If we have a previous stack frame, and we have a previous call,
  776. // fill in the return value...
  777. ExecutionContext &CallingSF = ECStack.back();
  778. if (Instruction *I = CallingSF.Caller.getInstruction()) {
  779. // Save result...
  780. if (!CallingSF.Caller.getType()->isVoidTy())
  781. SetValue(I, Result, CallingSF);
  782. if (InvokeInst *II = dyn_cast<InvokeInst> (I))
  783. SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
  784. CallingSF.Caller = CallSite(); // We returned from the call...
  785. }
  786. }
  787. }
  788. void Interpreter::visitReturnInst(ReturnInst &I) {
  789. ExecutionContext &SF = ECStack.back();
  790. Type *RetTy = Type::getVoidTy(I.getContext());
  791. GenericValue Result;
  792. // Save away the return value... (if we are not 'ret void')
  793. if (I.getNumOperands()) {
  794. RetTy = I.getReturnValue()->getType();
  795. Result = getOperandValue(I.getReturnValue(), SF);
  796. }
  797. popStackAndReturnValueToCaller(RetTy, Result);
  798. }
  799. void Interpreter::visitUnreachableInst(UnreachableInst &I) {
  800. report_fatal_error("Program executed an 'unreachable' instruction!");
  801. }
  802. void Interpreter::visitBranchInst(BranchInst &I) {
  803. ExecutionContext &SF = ECStack.back();
  804. BasicBlock *Dest;
  805. Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
  806. if (!I.isUnconditional()) {
  807. Value *Cond = I.getCondition();
  808. if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
  809. Dest = I.getSuccessor(1);
  810. }
  811. SwitchToNewBasicBlock(Dest, SF);
  812. }
  813. void Interpreter::visitSwitchInst(SwitchInst &I) {
  814. ExecutionContext &SF = ECStack.back();
  815. Value* Cond = I.getCondition();
  816. Type *ElTy = Cond->getType();
  817. GenericValue CondVal = getOperandValue(Cond, SF);
  818. // Check to see if any of the cases match...
  819. BasicBlock *Dest = nullptr;
  820. for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
  821. GenericValue CaseVal = getOperandValue(i.getCaseValue(), SF);
  822. if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
  823. Dest = cast<BasicBlock>(i.getCaseSuccessor());
  824. break;
  825. }
  826. }
  827. if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
  828. SwitchToNewBasicBlock(Dest, SF);
  829. }
  830. void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
  831. ExecutionContext &SF = ECStack.back();
  832. void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
  833. SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
  834. }
  835. // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
  836. // This function handles the actual updating of block and instruction iterators
  837. // as well as execution of all of the PHI nodes in the destination block.
  838. //
  839. // This method does this because all of the PHI nodes must be executed
  840. // atomically, reading their inputs before any of the results are updated. Not
  841. // doing this can cause problems if the PHI nodes depend on other PHI nodes for
  842. // their inputs. If the input PHI node is updated before it is read, incorrect
  843. // results can happen. Thus we use a two phase approach.
  844. //
  845. void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
  846. BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
  847. SF.CurBB = Dest; // Update CurBB to branch destination
  848. SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
  849. if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
  850. // Loop over all of the PHI nodes in the current block, reading their inputs.
  851. std::vector<GenericValue> ResultValues;
  852. for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
  853. // Search for the value corresponding to this previous bb...
  854. int i = PN->getBasicBlockIndex(PrevBB);
  855. assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
  856. Value *IncomingValue = PN->getIncomingValue(i);
  857. // Save the incoming value for this PHI node...
  858. ResultValues.push_back(getOperandValue(IncomingValue, SF));
  859. }
  860. // Now loop over all of the PHI nodes setting their values...
  861. SF.CurInst = SF.CurBB->begin();
  862. for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
  863. PHINode *PN = cast<PHINode>(SF.CurInst);
  864. SetValue(PN, ResultValues[i], SF);
  865. }
  866. }
  867. //===----------------------------------------------------------------------===//
  868. // Memory Instruction Implementations
  869. //===----------------------------------------------------------------------===//
  870. void Interpreter::visitAllocaInst(AllocaInst &I) {
  871. ExecutionContext &SF = ECStack.back();
  872. Type *Ty = I.getType()->getElementType(); // Type to be allocated
  873. // Get the number of elements being allocated by the array...
  874. unsigned NumElements =
  875. getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
  876. unsigned TypeSize = (size_t)TD.getTypeAllocSize(Ty);
  877. // Avoid malloc-ing zero bytes, use max()...
  878. unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
  879. // Allocate enough memory to hold the type...
  880. void *Memory = malloc(MemToAlloc);
  881. DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
  882. << NumElements << " (Total: " << MemToAlloc << ") at "
  883. << uintptr_t(Memory) << '\n');
  884. GenericValue Result = PTOGV(Memory);
  885. assert(Result.PointerVal && "Null pointer returned by malloc!");
  886. SetValue(&I, Result, SF);
  887. if (I.getOpcode() == Instruction::Alloca)
  888. ECStack.back().Allocas.add(Memory);
  889. }
  890. // getElementOffset - The workhorse for getelementptr.
  891. //
  892. GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
  893. gep_type_iterator E,
  894. ExecutionContext &SF) {
  895. assert(Ptr->getType()->isPointerTy() &&
  896. "Cannot getElementOffset of a nonpointer type!");
  897. uint64_t Total = 0;
  898. for (; I != E; ++I) {
  899. if (StructType *STy = dyn_cast<StructType>(*I)) {
  900. const StructLayout *SLO = TD.getStructLayout(STy);
  901. const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
  902. unsigned Index = unsigned(CPU->getZExtValue());
  903. Total += SLO->getElementOffset(Index);
  904. } else {
  905. SequentialType *ST = cast<SequentialType>(*I);
  906. // Get the index number for the array... which must be long type...
  907. GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
  908. int64_t Idx;
  909. unsigned BitWidth =
  910. cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
  911. if (BitWidth == 32)
  912. Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
  913. else {
  914. assert(BitWidth == 64 && "Invalid index type for getelementptr");
  915. Idx = (int64_t)IdxGV.IntVal.getZExtValue();
  916. }
  917. Total += TD.getTypeAllocSize(ST->getElementType())*Idx;
  918. }
  919. }
  920. GenericValue Result;
  921. Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
  922. DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
  923. return Result;
  924. }
  925. void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
  926. ExecutionContext &SF = ECStack.back();
  927. SetValue(&I, executeGEPOperation(I.getPointerOperand(),
  928. gep_type_begin(I), gep_type_end(I), SF), SF);
  929. }
  930. void Interpreter::visitLoadInst(LoadInst &I) {
  931. ExecutionContext &SF = ECStack.back();
  932. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  933. GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
  934. GenericValue Result;
  935. LoadValueFromMemory(Result, Ptr, I.getType());
  936. SetValue(&I, Result, SF);
  937. if (I.isVolatile() && PrintVolatile)
  938. dbgs() << "Volatile load " << I;
  939. }
  940. void Interpreter::visitStoreInst(StoreInst &I) {
  941. ExecutionContext &SF = ECStack.back();
  942. GenericValue Val = getOperandValue(I.getOperand(0), SF);
  943. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  944. StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
  945. I.getOperand(0)->getType());
  946. if (I.isVolatile() && PrintVolatile)
  947. dbgs() << "Volatile store: " << I;
  948. }
  949. //===----------------------------------------------------------------------===//
  950. // Miscellaneous Instruction Implementations
  951. //===----------------------------------------------------------------------===//
  952. void Interpreter::visitCallSite(CallSite CS) {
  953. ExecutionContext &SF = ECStack.back();
  954. // Check to see if this is an intrinsic function call...
  955. Function *F = CS.getCalledFunction();
  956. if (F && F->isDeclaration())
  957. switch (F->getIntrinsicID()) {
  958. case Intrinsic::not_intrinsic:
  959. break;
  960. case Intrinsic::vastart: { // va_start
  961. GenericValue ArgIndex;
  962. ArgIndex.UIntPairVal.first = ECStack.size() - 1;
  963. ArgIndex.UIntPairVal.second = 0;
  964. SetValue(CS.getInstruction(), ArgIndex, SF);
  965. return;
  966. }
  967. case Intrinsic::vaend: // va_end is a noop for the interpreter
  968. return;
  969. case Intrinsic::vacopy: // va_copy: dest = src
  970. SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
  971. return;
  972. default:
  973. // If it is an unknown intrinsic function, use the intrinsic lowering
  974. // class to transform it into hopefully tasty LLVM code.
  975. //
  976. BasicBlock::iterator me(CS.getInstruction());
  977. BasicBlock *Parent = CS.getInstruction()->getParent();
  978. bool atBegin(Parent->begin() == me);
  979. if (!atBegin)
  980. --me;
  981. IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
  982. // Restore the CurInst pointer to the first instruction newly inserted, if
  983. // any.
  984. if (atBegin) {
  985. SF.CurInst = Parent->begin();
  986. } else {
  987. SF.CurInst = me;
  988. ++SF.CurInst;
  989. }
  990. return;
  991. }
  992. SF.Caller = CS;
  993. std::vector<GenericValue> ArgVals;
  994. const unsigned NumArgs = SF.Caller.arg_size();
  995. ArgVals.reserve(NumArgs);
  996. uint16_t pNum = 1;
  997. for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
  998. e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
  999. Value *V = *i;
  1000. ArgVals.push_back(getOperandValue(V, SF));
  1001. }
  1002. // To handle indirect calls, we must get the pointer value from the argument
  1003. // and treat it as a function pointer.
  1004. GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
  1005. callFunction((Function*)GVTOP(SRC), ArgVals);
  1006. }
  1007. // auxiliary function for shift operations
  1008. static unsigned getShiftAmount(uint64_t orgShiftAmount,
  1009. llvm::APInt valueToShift) {
  1010. unsigned valueWidth = valueToShift.getBitWidth();
  1011. if (orgShiftAmount < (uint64_t)valueWidth)
  1012. return orgShiftAmount;
  1013. // according to the llvm documentation, if orgShiftAmount > valueWidth,
  1014. // the result is undfeined. but we do shift by this rule:
  1015. return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
  1016. }
  1017. void Interpreter::visitShl(BinaryOperator &I) {
  1018. ExecutionContext &SF = ECStack.back();
  1019. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1020. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1021. GenericValue Dest;
  1022. const Type *Ty = I.getType();
  1023. if (Ty->isVectorTy()) {
  1024. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1025. assert(src1Size == Src2.AggregateVal.size());
  1026. for (unsigned i = 0; i < src1Size; i++) {
  1027. GenericValue Result;
  1028. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1029. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1030. Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1031. Dest.AggregateVal.push_back(Result);
  1032. }
  1033. } else {
  1034. // scalar
  1035. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1036. llvm::APInt valueToShift = Src1.IntVal;
  1037. Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1038. }
  1039. SetValue(&I, Dest, SF);
  1040. }
  1041. void Interpreter::visitLShr(BinaryOperator &I) {
  1042. ExecutionContext &SF = ECStack.back();
  1043. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1044. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1045. GenericValue Dest;
  1046. const Type *Ty = I.getType();
  1047. if (Ty->isVectorTy()) {
  1048. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1049. assert(src1Size == Src2.AggregateVal.size());
  1050. for (unsigned i = 0; i < src1Size; i++) {
  1051. GenericValue Result;
  1052. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1053. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1054. Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1055. Dest.AggregateVal.push_back(Result);
  1056. }
  1057. } else {
  1058. // scalar
  1059. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1060. llvm::APInt valueToShift = Src1.IntVal;
  1061. Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1062. }
  1063. SetValue(&I, Dest, SF);
  1064. }
  1065. void Interpreter::visitAShr(BinaryOperator &I) {
  1066. ExecutionContext &SF = ECStack.back();
  1067. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1068. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1069. GenericValue Dest;
  1070. const Type *Ty = I.getType();
  1071. if (Ty->isVectorTy()) {
  1072. size_t src1Size = Src1.AggregateVal.size();
  1073. assert(src1Size == Src2.AggregateVal.size());
  1074. for (unsigned i = 0; i < src1Size; i++) {
  1075. GenericValue Result;
  1076. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1077. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1078. Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1079. Dest.AggregateVal.push_back(Result);
  1080. }
  1081. } else {
  1082. // scalar
  1083. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1084. llvm::APInt valueToShift = Src1.IntVal;
  1085. Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1086. }
  1087. SetValue(&I, Dest, SF);
  1088. }
  1089. GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
  1090. ExecutionContext &SF) {
  1091. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1092. Type *SrcTy = SrcVal->getType();
  1093. if (SrcTy->isVectorTy()) {
  1094. Type *DstVecTy = DstTy->getScalarType();
  1095. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1096. unsigned NumElts = Src.AggregateVal.size();
  1097. // the sizes of src and dst vectors must be equal
  1098. Dest.AggregateVal.resize(NumElts);
  1099. for (unsigned i = 0; i < NumElts; i++)
  1100. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
  1101. } else {
  1102. IntegerType *DITy = cast<IntegerType>(DstTy);
  1103. unsigned DBitWidth = DITy->getBitWidth();
  1104. Dest.IntVal = Src.IntVal.trunc(DBitWidth);
  1105. }
  1106. return Dest;
  1107. }
  1108. GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
  1109. ExecutionContext &SF) {
  1110. const Type *SrcTy = SrcVal->getType();
  1111. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1112. if (SrcTy->isVectorTy()) {
  1113. const Type *DstVecTy = DstTy->getScalarType();
  1114. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1115. unsigned size = Src.AggregateVal.size();
  1116. // the sizes of src and dst vectors must be equal.
  1117. Dest.AggregateVal.resize(size);
  1118. for (unsigned i = 0; i < size; i++)
  1119. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
  1120. } else {
  1121. const IntegerType *DITy = cast<IntegerType>(DstTy);
  1122. unsigned DBitWidth = DITy->getBitWidth();
  1123. Dest.IntVal = Src.IntVal.sext(DBitWidth);
  1124. }
  1125. return Dest;
  1126. }
  1127. GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
  1128. ExecutionContext &SF) {
  1129. const Type *SrcTy = SrcVal->getType();
  1130. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1131. if (SrcTy->isVectorTy()) {
  1132. const Type *DstVecTy = DstTy->getScalarType();
  1133. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1134. unsigned size = Src.AggregateVal.size();
  1135. // the sizes of src and dst vectors must be equal.
  1136. Dest.AggregateVal.resize(size);
  1137. for (unsigned i = 0; i < size; i++)
  1138. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
  1139. } else {
  1140. const IntegerType *DITy = cast<IntegerType>(DstTy);
  1141. unsigned DBitWidth = DITy->getBitWidth();
  1142. Dest.IntVal = Src.IntVal.zext(DBitWidth);
  1143. }
  1144. return Dest;
  1145. }
  1146. GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
  1147. ExecutionContext &SF) {
  1148. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1149. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1150. assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
  1151. DstTy->getScalarType()->isFloatTy() &&
  1152. "Invalid FPTrunc instruction");
  1153. unsigned size = Src.AggregateVal.size();
  1154. // the sizes of src and dst vectors must be equal.
  1155. Dest.AggregateVal.resize(size);
  1156. for (unsigned i = 0; i < size; i++)
  1157. Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
  1158. } else {
  1159. assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
  1160. "Invalid FPTrunc instruction");
  1161. Dest.FloatVal = (float)Src.DoubleVal;
  1162. }
  1163. return Dest;
  1164. }
  1165. GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
  1166. ExecutionContext &SF) {
  1167. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1168. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1169. assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
  1170. DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
  1171. unsigned size = Src.AggregateVal.size();
  1172. // the sizes of src and dst vectors must be equal.
  1173. Dest.AggregateVal.resize(size);
  1174. for (unsigned i = 0; i < size; i++)
  1175. Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
  1176. } else {
  1177. assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
  1178. "Invalid FPExt instruction");
  1179. Dest.DoubleVal = (double)Src.FloatVal;
  1180. }
  1181. return Dest;
  1182. }
  1183. GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
  1184. ExecutionContext &SF) {
  1185. Type *SrcTy = SrcVal->getType();
  1186. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1187. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1188. const Type *DstVecTy = DstTy->getScalarType();
  1189. const Type *SrcVecTy = SrcTy->getScalarType();
  1190. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1191. unsigned size = Src.AggregateVal.size();
  1192. // the sizes of src and dst vectors must be equal.
  1193. Dest.AggregateVal.resize(size);
  1194. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1195. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1196. for (unsigned i = 0; i < size; i++)
  1197. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1198. Src.AggregateVal[i].FloatVal, DBitWidth);
  1199. } else {
  1200. for (unsigned i = 0; i < size; i++)
  1201. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1202. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1203. }
  1204. } else {
  1205. // scalar
  1206. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1207. assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1208. if (SrcTy->getTypeID() == Type::FloatTyID)
  1209. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1210. else {
  1211. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1212. }
  1213. }
  1214. return Dest;
  1215. }
  1216. GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
  1217. ExecutionContext &SF) {
  1218. Type *SrcTy = SrcVal->getType();
  1219. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1220. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1221. const Type *DstVecTy = DstTy->getScalarType();
  1222. const Type *SrcVecTy = SrcTy->getScalarType();
  1223. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1224. unsigned size = Src.AggregateVal.size();
  1225. // the sizes of src and dst vectors must be equal
  1226. Dest.AggregateVal.resize(size);
  1227. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1228. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1229. for (unsigned i = 0; i < size; i++)
  1230. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1231. Src.AggregateVal[i].FloatVal, DBitWidth);
  1232. } else {
  1233. for (unsigned i = 0; i < size; i++)
  1234. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1235. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1236. }
  1237. } else {
  1238. // scalar
  1239. unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1240. assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1241. if (SrcTy->getTypeID() == Type::FloatTyID)
  1242. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1243. else {
  1244. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1245. }
  1246. }
  1247. return Dest;
  1248. }
  1249. GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
  1250. ExecutionContext &SF) {
  1251. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1252. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1253. const Type *DstVecTy = DstTy->getScalarType();
  1254. unsigned size = Src.AggregateVal.size();
  1255. // the sizes of src and dst vectors must be equal
  1256. Dest.AggregateVal.resize(size);
  1257. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1258. assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1259. for (unsigned i = 0; i < size; i++)
  1260. Dest.AggregateVal[i].FloatVal =
  1261. APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
  1262. } else {
  1263. for (unsigned i = 0; i < size; i++)
  1264. Dest.AggregateVal[i].DoubleVal =
  1265. APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
  1266. }
  1267. } else {
  1268. // scalar
  1269. assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1270. if (DstTy->getTypeID() == Type::FloatTyID)
  1271. Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
  1272. else {
  1273. Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
  1274. }
  1275. }
  1276. return Dest;
  1277. }
  1278. GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
  1279. ExecutionContext &SF) {
  1280. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1281. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1282. const Type *DstVecTy = DstTy->getScalarType();
  1283. unsigned size = Src.AggregateVal.size();
  1284. // the sizes of src and dst vectors must be equal
  1285. Dest.AggregateVal.resize(size);
  1286. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1287. assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1288. for (unsigned i = 0; i < size; i++)
  1289. Dest.AggregateVal[i].FloatVal =
  1290. APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
  1291. } else {
  1292. for (unsigned i = 0; i < size; i++)
  1293. Dest.AggregateVal[i].DoubleVal =
  1294. APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
  1295. }
  1296. } else {
  1297. // scalar
  1298. assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1299. if (DstTy->getTypeID() == Type::FloatTyID)
  1300. Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
  1301. else {
  1302. Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
  1303. }
  1304. }
  1305. return Dest;
  1306. }
  1307. GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
  1308. ExecutionContext &SF) {
  1309. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1310. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1311. assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
  1312. Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
  1313. return Dest;
  1314. }
  1315. GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
  1316. ExecutionContext &SF) {
  1317. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1318. assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
  1319. uint32_t PtrSize = TD.getPointerSizeInBits();
  1320. if (PtrSize != Src.IntVal.getBitWidth())
  1321. Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
  1322. Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
  1323. return Dest;
  1324. }
  1325. GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
  1326. ExecutionContext &SF) {
  1327. // This instruction supports bitwise conversion of vectors to integers and
  1328. // to vectors of other types (as long as they have the same size)
  1329. Type *SrcTy = SrcVal->getType();
  1330. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1331. if ((SrcTy->getTypeID() == Type::VectorTyID) ||
  1332. (DstTy->getTypeID() == Type::VectorTyID)) {
  1333. // vector src bitcast to vector dst or vector src bitcast to scalar dst or
  1334. // scalar src bitcast to vector dst
  1335. bool isLittleEndian = TD.isLittleEndian();
  1336. GenericValue TempDst, TempSrc, SrcVec;
  1337. const Type *SrcElemTy;
  1338. const Type *DstElemTy;
  1339. unsigned SrcBitSize;
  1340. unsigned DstBitSize;
  1341. unsigned SrcNum;
  1342. unsigned DstNum;
  1343. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1344. SrcElemTy = SrcTy->getScalarType();
  1345. SrcBitSize = SrcTy->getScalarSizeInBits();
  1346. SrcNum = Src.AggregateVal.size();
  1347. SrcVec = Src;
  1348. } else {
  1349. // if src is scalar value, make it vector <1 x type>
  1350. SrcElemTy = SrcTy;
  1351. SrcBitSize = SrcTy->getPrimitiveSizeInBits();
  1352. SrcNum = 1;
  1353. SrcVec.AggregateVal.push_back(Src);
  1354. }
  1355. if (DstTy->getTypeID() == Type::VectorTyID) {
  1356. DstElemTy = DstTy->getScalarType();
  1357. DstBitSize = DstTy->getScalarSizeInBits();
  1358. DstNum = (SrcNum * SrcBitSize) / DstBitSize;
  1359. } else {
  1360. DstElemTy = DstTy;
  1361. DstBitSize = DstTy->getPrimitiveSizeInBits();
  1362. DstNum = 1;
  1363. }
  1364. if (SrcNum * SrcBitSize != DstNum * DstBitSize)
  1365. llvm_unreachable("Invalid BitCast");
  1366. // If src is floating point, cast to integer first.
  1367. TempSrc.AggregateVal.resize(SrcNum);
  1368. if (SrcElemTy->isFloatTy()) {
  1369. for (unsigned i = 0; i < SrcNum; i++)
  1370. TempSrc.AggregateVal[i].IntVal =
  1371. APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
  1372. } else if (SrcElemTy->isDoubleTy()) {
  1373. for (unsigned i = 0; i < SrcNum; i++)
  1374. TempSrc.AggregateVal[i].IntVal =
  1375. APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
  1376. } else if (SrcElemTy->isIntegerTy()) {
  1377. for (unsigned i = 0; i < SrcNum; i++)
  1378. TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
  1379. } else {
  1380. // Pointers are not allowed as the element type of vector.
  1381. llvm_unreachable("Invalid Bitcast");
  1382. }
  1383. // now TempSrc is integer type vector
  1384. if (DstNum < SrcNum) {
  1385. // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
  1386. unsigned Ratio = SrcNum / DstNum;
  1387. unsigned SrcElt = 0;
  1388. for (unsigned i = 0; i < DstNum; i++) {
  1389. GenericValue Elt;
  1390. Elt.IntVal = 0;
  1391. Elt.IntVal = Elt.IntVal.zext(DstBitSize);
  1392. unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
  1393. for (unsigned j = 0; j < Ratio; j++) {
  1394. APInt Tmp;
  1395. Tmp = Tmp.zext(SrcBitSize);
  1396. Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
  1397. Tmp = Tmp.zext(DstBitSize);
  1398. Tmp = Tmp.shl(ShiftAmt);
  1399. ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
  1400. Elt.IntVal |= Tmp;
  1401. }
  1402. TempDst.AggregateVal.push_back(Elt);
  1403. }
  1404. } else {
  1405. // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
  1406. unsigned Ratio = DstNum / SrcNum;
  1407. for (unsigned i = 0; i < SrcNum; i++) {
  1408. unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
  1409. for (unsigned j = 0; j < Ratio; j++) {
  1410. GenericValue Elt;
  1411. Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
  1412. Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
  1413. Elt.IntVal = Elt.IntVal.lshr(ShiftAmt);
  1414. // it could be DstBitSize == SrcBitSize, so check it
  1415. if (DstBitSize < SrcBitSize)
  1416. Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
  1417. ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
  1418. TempDst.AggregateVal.push_back(Elt);
  1419. }
  1420. }
  1421. }
  1422. // convert result from integer to specified type
  1423. if (DstTy->getTypeID() == Type::VectorTyID) {
  1424. if (DstElemTy->isDoubleTy()) {
  1425. Dest.AggregateVal.resize(DstNum);
  1426. for (unsigned i = 0; i < DstNum; i++)
  1427. Dest.AggregateVal[i].DoubleVal =
  1428. TempDst.AggregateVal[i].IntVal.bitsToDouble();
  1429. } else if (DstElemTy->isFloatTy()) {
  1430. Dest.AggregateVal.resize(DstNum);
  1431. for (unsigned i = 0; i < DstNum; i++)
  1432. Dest.AggregateVal[i].FloatVal =
  1433. TempDst.AggregateVal[i].IntVal.bitsToFloat();
  1434. } else {
  1435. Dest = TempDst;
  1436. }
  1437. } else {
  1438. if (DstElemTy->isDoubleTy())
  1439. Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
  1440. else if (DstElemTy->isFloatTy()) {
  1441. Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
  1442. } else {
  1443. Dest.IntVal = TempDst.AggregateVal[0].IntVal;
  1444. }
  1445. }
  1446. } else { // if ((SrcTy->getTypeID() == Type::VectorTyID) ||
  1447. // (DstTy->getTypeID() == Type::VectorTyID))
  1448. // scalar src bitcast to scalar dst
  1449. if (DstTy->isPointerTy()) {
  1450. assert(SrcTy->isPointerTy() && "Invalid BitCast");
  1451. Dest.PointerVal = Src.PointerVal;
  1452. } else if (DstTy->isIntegerTy()) {
  1453. if (SrcTy->isFloatTy())
  1454. Dest.IntVal = APInt::floatToBits(Src.FloatVal);
  1455. else if (SrcTy->isDoubleTy()) {
  1456. Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
  1457. } else if (SrcTy->isIntegerTy()) {
  1458. Dest.IntVal = Src.IntVal;
  1459. } else {
  1460. llvm_unreachable("Invalid BitCast");
  1461. }
  1462. } else if (DstTy->isFloatTy()) {
  1463. if (SrcTy->isIntegerTy())
  1464. Dest.FloatVal = Src.IntVal.bitsToFloat();
  1465. else {
  1466. Dest.FloatVal = Src.FloatVal;
  1467. }
  1468. } else if (DstTy->isDoubleTy()) {
  1469. if (SrcTy->isIntegerTy())
  1470. Dest.DoubleVal = Src.IntVal.bitsToDouble();
  1471. else {
  1472. Dest.DoubleVal = Src.DoubleVal;
  1473. }
  1474. } else {
  1475. llvm_unreachable("Invalid Bitcast");
  1476. }
  1477. }
  1478. return Dest;
  1479. }
  1480. void Interpreter::visitTruncInst(TruncInst &I) {
  1481. ExecutionContext &SF = ECStack.back();
  1482. SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1483. }
  1484. void Interpreter::visitSExtInst(SExtInst &I) {
  1485. ExecutionContext &SF = ECStack.back();
  1486. SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
  1487. }
  1488. void Interpreter::visitZExtInst(ZExtInst &I) {
  1489. ExecutionContext &SF = ECStack.back();
  1490. SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
  1491. }
  1492. void Interpreter::visitFPTruncInst(FPTruncInst &I) {
  1493. ExecutionContext &SF = ECStack.back();
  1494. SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1495. }
  1496. void Interpreter::visitFPExtInst(FPExtInst &I) {
  1497. ExecutionContext &SF = ECStack.back();
  1498. SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
  1499. }
  1500. void Interpreter::visitUIToFPInst(UIToFPInst &I) {
  1501. ExecutionContext &SF = ECStack.back();
  1502. SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1503. }
  1504. void Interpreter::visitSIToFPInst(SIToFPInst &I) {
  1505. ExecutionContext &SF = ECStack.back();
  1506. SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1507. }
  1508. void Interpreter::visitFPToUIInst(FPToUIInst &I) {
  1509. ExecutionContext &SF = ECStack.back();
  1510. SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
  1511. }
  1512. void Interpreter::visitFPToSIInst(FPToSIInst &I) {
  1513. ExecutionContext &SF = ECStack.back();
  1514. SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
  1515. }
  1516. void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
  1517. ExecutionContext &SF = ECStack.back();
  1518. SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
  1519. }
  1520. void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
  1521. ExecutionContext &SF = ECStack.back();
  1522. SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
  1523. }
  1524. void Interpreter::visitBitCastInst(BitCastInst &I) {
  1525. ExecutionContext &SF = ECStack.back();
  1526. SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
  1527. }
  1528. #define IMPLEMENT_VAARG(TY) \
  1529. case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
  1530. void Interpreter::visitVAArgInst(VAArgInst &I) {
  1531. ExecutionContext &SF = ECStack.back();
  1532. // Get the incoming valist parameter. LLI treats the valist as a
  1533. // (ec-stack-depth var-arg-index) pair.
  1534. GenericValue VAList = getOperandValue(I.getOperand(0), SF);
  1535. GenericValue Dest;
  1536. GenericValue Src = ECStack[VAList.UIntPairVal.first]
  1537. .VarArgs[VAList.UIntPairVal.second];
  1538. Type *Ty = I.getType();
  1539. switch (Ty->getTypeID()) {
  1540. case Type::IntegerTyID:
  1541. Dest.IntVal = Src.IntVal;
  1542. break;
  1543. IMPLEMENT_VAARG(Pointer);
  1544. IMPLEMENT_VAARG(Float);
  1545. IMPLEMENT_VAARG(Double);
  1546. default:
  1547. dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
  1548. llvm_unreachable(nullptr);
  1549. }
  1550. // Set the Value of this Instruction.
  1551. SetValue(&I, Dest, SF);
  1552. // Move the pointer to the next vararg.
  1553. ++VAList.UIntPairVal.second;
  1554. }
  1555. void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
  1556. ExecutionContext &SF = ECStack.back();
  1557. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1558. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1559. GenericValue Dest;
  1560. Type *Ty = I.getType();
  1561. const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
  1562. if(Src1.AggregateVal.size() > indx) {
  1563. switch (Ty->getTypeID()) {
  1564. default:
  1565. dbgs() << "Unhandled destination type for extractelement instruction: "
  1566. << *Ty << "\n";
  1567. llvm_unreachable(nullptr);
  1568. break;
  1569. case Type::IntegerTyID:
  1570. Dest.IntVal = Src1.AggregateVal[indx].IntVal;
  1571. break;
  1572. case Type::FloatTyID:
  1573. Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
  1574. break;
  1575. case Type::DoubleTyID:
  1576. Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
  1577. break;
  1578. }
  1579. } else {
  1580. dbgs() << "Invalid index in extractelement instruction\n";
  1581. }
  1582. SetValue(&I, Dest, SF);
  1583. }
  1584. void Interpreter::visitInsertElementInst(InsertElementInst &I) {
  1585. ExecutionContext &SF = ECStack.back();
  1586. Type *Ty = I.getType();
  1587. if(!(Ty->isVectorTy()) )
  1588. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1589. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1590. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1591. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1592. GenericValue Dest;
  1593. Type *TyContained = Ty->getContainedType(0);
  1594. const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
  1595. Dest.AggregateVal = Src1.AggregateVal;
  1596. if(Src1.AggregateVal.size() <= indx)
  1597. llvm_unreachable("Invalid index in insertelement instruction");
  1598. switch (TyContained->getTypeID()) {
  1599. default:
  1600. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1601. case Type::IntegerTyID:
  1602. Dest.AggregateVal[indx].IntVal = Src2.IntVal;
  1603. break;
  1604. case Type::FloatTyID:
  1605. Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
  1606. break;
  1607. case Type::DoubleTyID:
  1608. Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
  1609. break;
  1610. }
  1611. SetValue(&I, Dest, SF);
  1612. }
  1613. void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
  1614. ExecutionContext &SF = ECStack.back();
  1615. Type *Ty = I.getType();
  1616. if(!(Ty->isVectorTy()))
  1617. llvm_unreachable("Unhandled dest type for shufflevector instruction");
  1618. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1619. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1620. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1621. GenericValue Dest;
  1622. // There is no need to check types of src1 and src2, because the compiled
  1623. // bytecode can't contain different types for src1 and src2 for a
  1624. // shufflevector instruction.
  1625. Type *TyContained = Ty->getContainedType(0);
  1626. unsigned src1Size = (unsigned)Src1.AggregateVal.size();
  1627. unsigned src2Size = (unsigned)Src2.AggregateVal.size();
  1628. unsigned src3Size = (unsigned)Src3.AggregateVal.size();
  1629. Dest.AggregateVal.resize(src3Size);
  1630. switch (TyContained->getTypeID()) {
  1631. default:
  1632. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1633. break;
  1634. case Type::IntegerTyID:
  1635. for( unsigned i=0; i<src3Size; i++) {
  1636. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1637. if(j < src1Size)
  1638. Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
  1639. else if(j < src1Size + src2Size)
  1640. Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
  1641. else
  1642. // The selector may not be greater than sum of lengths of first and
  1643. // second operands and llasm should not allow situation like
  1644. // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
  1645. // <2 x i32> < i32 0, i32 5 >,
  1646. // where i32 5 is invalid, but let it be additional check here:
  1647. llvm_unreachable("Invalid mask in shufflevector instruction");
  1648. }
  1649. break;
  1650. case Type::FloatTyID:
  1651. for( unsigned i=0; i<src3Size; i++) {
  1652. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1653. if(j < src1Size)
  1654. Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
  1655. else if(j < src1Size + src2Size)
  1656. Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
  1657. else
  1658. llvm_unreachable("Invalid mask in shufflevector instruction");
  1659. }
  1660. break;
  1661. case Type::DoubleTyID:
  1662. for( unsigned i=0; i<src3Size; i++) {
  1663. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1664. if(j < src1Size)
  1665. Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
  1666. else if(j < src1Size + src2Size)
  1667. Dest.AggregateVal[i].DoubleVal =
  1668. Src2.AggregateVal[j-src1Size].DoubleVal;
  1669. else
  1670. llvm_unreachable("Invalid mask in shufflevector instruction");
  1671. }
  1672. break;
  1673. }
  1674. SetValue(&I, Dest, SF);
  1675. }
  1676. void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
  1677. ExecutionContext &SF = ECStack.back();
  1678. Value *Agg = I.getAggregateOperand();
  1679. GenericValue Dest;
  1680. GenericValue Src = getOperandValue(Agg, SF);
  1681. ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
  1682. unsigned Num = I.getNumIndices();
  1683. GenericValue *pSrc = &Src;
  1684. for (unsigned i = 0 ; i < Num; ++i) {
  1685. pSrc = &pSrc->AggregateVal[*IdxBegin];
  1686. ++IdxBegin;
  1687. }
  1688. Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
  1689. switch (IndexedType->getTypeID()) {
  1690. default:
  1691. llvm_unreachable("Unhandled dest type for extractelement instruction");
  1692. break;
  1693. case Type::IntegerTyID:
  1694. Dest.IntVal = pSrc->IntVal;
  1695. break;
  1696. case Type::FloatTyID:
  1697. Dest.FloatVal = pSrc->FloatVal;
  1698. break;
  1699. case Type::DoubleTyID:
  1700. Dest.DoubleVal = pSrc->DoubleVal;
  1701. break;
  1702. case Type::ArrayTyID:
  1703. case Type::StructTyID:
  1704. case Type::VectorTyID:
  1705. Dest.AggregateVal = pSrc->AggregateVal;
  1706. break;
  1707. case Type::PointerTyID:
  1708. Dest.PointerVal = pSrc->PointerVal;
  1709. break;
  1710. }
  1711. SetValue(&I, Dest, SF);
  1712. }
  1713. void Interpreter::visitInsertValueInst(InsertValueInst &I) {
  1714. ExecutionContext &SF = ECStack.back();
  1715. Value *Agg = I.getAggregateOperand();
  1716. GenericValue Src1 = getOperandValue(Agg, SF);
  1717. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1718. GenericValue Dest = Src1; // Dest is a slightly changed Src1
  1719. ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
  1720. unsigned Num = I.getNumIndices();
  1721. GenericValue *pDest = &Dest;
  1722. for (unsigned i = 0 ; i < Num; ++i) {
  1723. pDest = &pDest->AggregateVal[*IdxBegin];
  1724. ++IdxBegin;
  1725. }
  1726. // pDest points to the target value in the Dest now
  1727. Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
  1728. switch (IndexedType->getTypeID()) {
  1729. default:
  1730. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1731. break;
  1732. case Type::IntegerTyID:
  1733. pDest->IntVal = Src2.IntVal;
  1734. break;
  1735. case Type::FloatTyID:
  1736. pDest->FloatVal = Src2.FloatVal;
  1737. break;
  1738. case Type::DoubleTyID:
  1739. pDest->DoubleVal = Src2.DoubleVal;
  1740. break;
  1741. case Type::ArrayTyID:
  1742. case Type::StructTyID:
  1743. case Type::VectorTyID:
  1744. pDest->AggregateVal = Src2.AggregateVal;
  1745. break;
  1746. case Type::PointerTyID:
  1747. pDest->PointerVal = Src2.PointerVal;
  1748. break;
  1749. }
  1750. SetValue(&I, Dest, SF);
  1751. }
  1752. GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
  1753. ExecutionContext &SF) {
  1754. switch (CE->getOpcode()) {
  1755. case Instruction::Trunc:
  1756. return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
  1757. case Instruction::ZExt:
  1758. return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
  1759. case Instruction::SExt:
  1760. return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
  1761. case Instruction::FPTrunc:
  1762. return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
  1763. case Instruction::FPExt:
  1764. return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
  1765. case Instruction::UIToFP:
  1766. return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1767. case Instruction::SIToFP:
  1768. return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1769. case Instruction::FPToUI:
  1770. return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
  1771. case Instruction::FPToSI:
  1772. return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
  1773. case Instruction::PtrToInt:
  1774. return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
  1775. case Instruction::IntToPtr:
  1776. return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
  1777. case Instruction::BitCast:
  1778. return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
  1779. case Instruction::GetElementPtr:
  1780. return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
  1781. gep_type_end(CE), SF);
  1782. case Instruction::FCmp:
  1783. case Instruction::ICmp:
  1784. return executeCmpInst(CE->getPredicate(),
  1785. getOperandValue(CE->getOperand(0), SF),
  1786. getOperandValue(CE->getOperand(1), SF),
  1787. CE->getOperand(0)->getType());
  1788. case Instruction::Select:
  1789. return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
  1790. getOperandValue(CE->getOperand(1), SF),
  1791. getOperandValue(CE->getOperand(2), SF),
  1792. CE->getOperand(0)->getType());
  1793. default :
  1794. break;
  1795. }
  1796. // The cases below here require a GenericValue parameter for the result
  1797. // so we initialize one, compute it and then return it.
  1798. GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
  1799. GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
  1800. GenericValue Dest;
  1801. Type * Ty = CE->getOperand(0)->getType();
  1802. switch (CE->getOpcode()) {
  1803. case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
  1804. case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
  1805. case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
  1806. case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
  1807. case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
  1808. case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
  1809. case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
  1810. case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
  1811. case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
  1812. case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
  1813. case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
  1814. case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
  1815. case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
  1816. case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
  1817. case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
  1818. case Instruction::Shl:
  1819. Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
  1820. break;
  1821. case Instruction::LShr:
  1822. Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
  1823. break;
  1824. case Instruction::AShr:
  1825. Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
  1826. break;
  1827. default:
  1828. dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
  1829. llvm_unreachable("Unhandled ConstantExpr");
  1830. }
  1831. return Dest;
  1832. }
  1833. GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
  1834. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  1835. return getConstantExprValue(CE, SF);
  1836. } else if (Constant *CPV = dyn_cast<Constant>(V)) {
  1837. return getConstantValue(CPV);
  1838. } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
  1839. return PTOGV(getPointerToGlobal(GV));
  1840. } else {
  1841. return SF.Values[V];
  1842. }
  1843. }
  1844. //===----------------------------------------------------------------------===//
  1845. // Dispatch and Execution Code
  1846. //===----------------------------------------------------------------------===//
  1847. //===----------------------------------------------------------------------===//
  1848. // callFunction - Execute the specified function...
  1849. //
  1850. void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
  1851. assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
  1852. ECStack.back().Caller.arg_size() == ArgVals.size()) &&
  1853. "Incorrect number of arguments passed into function call!");
  1854. // Make a new stack frame... and fill it in.
  1855. ECStack.emplace_back();
  1856. ExecutionContext &StackFrame = ECStack.back();
  1857. StackFrame.CurFunction = F;
  1858. // Special handling for external functions.
  1859. if (F->isDeclaration()) {
  1860. GenericValue Result = callExternalFunction (F, ArgVals);
  1861. // Simulate a 'ret' instruction of the appropriate type.
  1862. popStackAndReturnValueToCaller (F->getReturnType (), Result);
  1863. return;
  1864. }
  1865. // Get pointers to first LLVM BB & Instruction in function.
  1866. StackFrame.CurBB = F->begin();
  1867. StackFrame.CurInst = StackFrame.CurBB->begin();
  1868. // Run through the function arguments and initialize their values...
  1869. assert((ArgVals.size() == F->arg_size() ||
  1870. (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
  1871. "Invalid number of values passed to function invocation!");
  1872. // Handle non-varargs arguments...
  1873. unsigned i = 0;
  1874. for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
  1875. AI != E; ++AI, ++i)
  1876. SetValue(AI, ArgVals[i], StackFrame);
  1877. // Handle varargs arguments...
  1878. StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
  1879. }
  1880. void Interpreter::run() {
  1881. while (!ECStack.empty()) {
  1882. // Interpret a single instruction & increment the "PC".
  1883. ExecutionContext &SF = ECStack.back(); // Current stack frame
  1884. Instruction &I = *SF.CurInst++; // Increment before execute
  1885. // Track the number of dynamic instructions executed.
  1886. ++NumDynamicInsts;
  1887. DEBUG(dbgs() << "About to interpret: " << I);
  1888. visit(I); // Dispatch to one of the visit* methods...
  1889. #if 0
  1890. // This is not safe, as visiting the instruction could lower it and free I.
  1891. DEBUG(
  1892. if (!isa<CallInst>(I) && !isa<InvokeInst>(I) &&
  1893. I.getType() != Type::VoidTy) {
  1894. dbgs() << " --> ";
  1895. const GenericValue &Val = SF.Values[&I];
  1896. switch (I.getType()->getTypeID()) {
  1897. default: llvm_unreachable("Invalid GenericValue Type");
  1898. case Type::VoidTyID: dbgs() << "void"; break;
  1899. case Type::FloatTyID: dbgs() << "float " << Val.FloatVal; break;
  1900. case Type::DoubleTyID: dbgs() << "double " << Val.DoubleVal; break;
  1901. case Type::PointerTyID: dbgs() << "void* " << intptr_t(Val.PointerVal);
  1902. break;
  1903. case Type::IntegerTyID:
  1904. dbgs() << "i" << Val.IntVal.getBitWidth() << " "
  1905. << Val.IntVal.toStringUnsigned(10)
  1906. << " (0x" << Val.IntVal.toStringUnsigned(16) << ")\n";
  1907. break;
  1908. }
  1909. });
  1910. #endif
  1911. }
  1912. }