Execution.cpp 80 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124
  1. //===-- Execution.cpp - Implement code to simulate the program ------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the actual instruction interpreter.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "Interpreter.h"
  14. #include "llvm/ADT/APInt.h"
  15. #include "llvm/ADT/Statistic.h"
  16. #include "llvm/CodeGen/IntrinsicLowering.h"
  17. #include "llvm/IR/Constants.h"
  18. #include "llvm/IR/DerivedTypes.h"
  19. #include "llvm/IR/GetElementPtrTypeIterator.h"
  20. #include "llvm/IR/Instructions.h"
  21. #include "llvm/Support/CommandLine.h"
  22. #include "llvm/Support/Debug.h"
  23. #include "llvm/Support/ErrorHandling.h"
  24. #include "llvm/Support/MathExtras.h"
  25. #include "llvm/Support/raw_ostream.h"
  26. #include <algorithm>
  27. #include <cmath>
  28. using namespace llvm;
  29. #define DEBUG_TYPE "interpreter"
  30. STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
  31. static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
  32. cl::desc("make the interpreter print every volatile load and store"));
  33. //===----------------------------------------------------------------------===//
  34. // Various Helper Functions
  35. //===----------------------------------------------------------------------===//
  36. static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
  37. SF.Values[V] = Val;
  38. }
  39. //===----------------------------------------------------------------------===//
  40. // Binary Instruction Implementations
  41. //===----------------------------------------------------------------------===//
  42. #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
  43. case Type::TY##TyID: \
  44. Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
  45. break
  46. static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
  47. GenericValue Src2, Type *Ty) {
  48. switch (Ty->getTypeID()) {
  49. IMPLEMENT_BINARY_OPERATOR(+, Float);
  50. IMPLEMENT_BINARY_OPERATOR(+, Double);
  51. default:
  52. dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
  53. llvm_unreachable(nullptr);
  54. }
  55. }
  56. static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
  57. GenericValue Src2, Type *Ty) {
  58. switch (Ty->getTypeID()) {
  59. IMPLEMENT_BINARY_OPERATOR(-, Float);
  60. IMPLEMENT_BINARY_OPERATOR(-, Double);
  61. default:
  62. dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
  63. llvm_unreachable(nullptr);
  64. }
  65. }
  66. static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
  67. GenericValue Src2, Type *Ty) {
  68. switch (Ty->getTypeID()) {
  69. IMPLEMENT_BINARY_OPERATOR(*, Float);
  70. IMPLEMENT_BINARY_OPERATOR(*, Double);
  71. default:
  72. dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
  73. llvm_unreachable(nullptr);
  74. }
  75. }
  76. static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
  77. GenericValue Src2, Type *Ty) {
  78. switch (Ty->getTypeID()) {
  79. IMPLEMENT_BINARY_OPERATOR(/, Float);
  80. IMPLEMENT_BINARY_OPERATOR(/, Double);
  81. default:
  82. dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
  83. llvm_unreachable(nullptr);
  84. }
  85. }
  86. static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
  87. GenericValue Src2, Type *Ty) {
  88. switch (Ty->getTypeID()) {
  89. case Type::FloatTyID:
  90. Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
  91. break;
  92. case Type::DoubleTyID:
  93. Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
  94. break;
  95. default:
  96. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  97. llvm_unreachable(nullptr);
  98. }
  99. }
  100. #define IMPLEMENT_INTEGER_ICMP(OP, TY) \
  101. case Type::IntegerTyID: \
  102. Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
  103. break;
  104. #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
  105. case Type::VectorTyID: { \
  106. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  107. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  108. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  109. Dest.AggregateVal[_i].IntVal = APInt(1, \
  110. Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
  111. } break;
  112. // Handle pointers specially because they must be compared with only as much
  113. // width as the host has. We _do not_ want to be comparing 64 bit values when
  114. // running on a 32-bit target, otherwise the upper 32 bits might mess up
  115. // comparisons if they contain garbage.
  116. #define IMPLEMENT_POINTER_ICMP(OP) \
  117. case Type::PointerTyID: \
  118. Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
  119. (void*)(intptr_t)Src2.PointerVal); \
  120. break;
  121. static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
  122. Type *Ty) {
  123. GenericValue Dest;
  124. switch (Ty->getTypeID()) {
  125. IMPLEMENT_INTEGER_ICMP(eq,Ty);
  126. IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
  127. IMPLEMENT_POINTER_ICMP(==);
  128. default:
  129. dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
  130. llvm_unreachable(nullptr);
  131. }
  132. return Dest;
  133. }
  134. static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
  135. Type *Ty) {
  136. GenericValue Dest;
  137. switch (Ty->getTypeID()) {
  138. IMPLEMENT_INTEGER_ICMP(ne,Ty);
  139. IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
  140. IMPLEMENT_POINTER_ICMP(!=);
  141. default:
  142. dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
  143. llvm_unreachable(nullptr);
  144. }
  145. return Dest;
  146. }
  147. static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
  148. Type *Ty) {
  149. GenericValue Dest;
  150. switch (Ty->getTypeID()) {
  151. IMPLEMENT_INTEGER_ICMP(ult,Ty);
  152. IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
  153. IMPLEMENT_POINTER_ICMP(<);
  154. default:
  155. dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
  156. llvm_unreachable(nullptr);
  157. }
  158. return Dest;
  159. }
  160. static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
  161. Type *Ty) {
  162. GenericValue Dest;
  163. switch (Ty->getTypeID()) {
  164. IMPLEMENT_INTEGER_ICMP(slt,Ty);
  165. IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
  166. IMPLEMENT_POINTER_ICMP(<);
  167. default:
  168. dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
  169. llvm_unreachable(nullptr);
  170. }
  171. return Dest;
  172. }
  173. static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
  174. Type *Ty) {
  175. GenericValue Dest;
  176. switch (Ty->getTypeID()) {
  177. IMPLEMENT_INTEGER_ICMP(ugt,Ty);
  178. IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
  179. IMPLEMENT_POINTER_ICMP(>);
  180. default:
  181. dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
  182. llvm_unreachable(nullptr);
  183. }
  184. return Dest;
  185. }
  186. static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
  187. Type *Ty) {
  188. GenericValue Dest;
  189. switch (Ty->getTypeID()) {
  190. IMPLEMENT_INTEGER_ICMP(sgt,Ty);
  191. IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
  192. IMPLEMENT_POINTER_ICMP(>);
  193. default:
  194. dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
  195. llvm_unreachable(nullptr);
  196. }
  197. return Dest;
  198. }
  199. static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
  200. Type *Ty) {
  201. GenericValue Dest;
  202. switch (Ty->getTypeID()) {
  203. IMPLEMENT_INTEGER_ICMP(ule,Ty);
  204. IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
  205. IMPLEMENT_POINTER_ICMP(<=);
  206. default:
  207. dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
  208. llvm_unreachable(nullptr);
  209. }
  210. return Dest;
  211. }
  212. static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
  213. Type *Ty) {
  214. GenericValue Dest;
  215. switch (Ty->getTypeID()) {
  216. IMPLEMENT_INTEGER_ICMP(sle,Ty);
  217. IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
  218. IMPLEMENT_POINTER_ICMP(<=);
  219. default:
  220. dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
  221. llvm_unreachable(nullptr);
  222. }
  223. return Dest;
  224. }
  225. static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
  226. Type *Ty) {
  227. GenericValue Dest;
  228. switch (Ty->getTypeID()) {
  229. IMPLEMENT_INTEGER_ICMP(uge,Ty);
  230. IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
  231. IMPLEMENT_POINTER_ICMP(>=);
  232. default:
  233. dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
  234. llvm_unreachable(nullptr);
  235. }
  236. return Dest;
  237. }
  238. static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
  239. Type *Ty) {
  240. GenericValue Dest;
  241. switch (Ty->getTypeID()) {
  242. IMPLEMENT_INTEGER_ICMP(sge,Ty);
  243. IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
  244. IMPLEMENT_POINTER_ICMP(>=);
  245. default:
  246. dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
  247. llvm_unreachable(nullptr);
  248. }
  249. return Dest;
  250. }
  251. void Interpreter::visitICmpInst(ICmpInst &I) {
  252. ExecutionContext &SF = ECStack.back();
  253. Type *Ty = I.getOperand(0)->getType();
  254. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  255. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  256. GenericValue R; // Result
  257. switch (I.getPredicate()) {
  258. case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
  259. case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
  260. case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
  261. case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
  262. case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
  263. case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
  264. case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
  265. case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
  266. case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
  267. case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
  268. default:
  269. dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
  270. llvm_unreachable(nullptr);
  271. }
  272. SetValue(&I, R, SF);
  273. }
  274. #define IMPLEMENT_FCMP(OP, TY) \
  275. case Type::TY##TyID: \
  276. Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
  277. break
  278. #define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
  279. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  280. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  281. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  282. Dest.AggregateVal[_i].IntVal = APInt(1, \
  283. Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
  284. break;
  285. #define IMPLEMENT_VECTOR_FCMP(OP) \
  286. case Type::VectorTyID: \
  287. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
  288. IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
  289. } else { \
  290. IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
  291. }
  292. static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
  293. Type *Ty) {
  294. GenericValue Dest;
  295. switch (Ty->getTypeID()) {
  296. IMPLEMENT_FCMP(==, Float);
  297. IMPLEMENT_FCMP(==, Double);
  298. IMPLEMENT_VECTOR_FCMP(==);
  299. default:
  300. dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
  301. llvm_unreachable(nullptr);
  302. }
  303. return Dest;
  304. }
  305. #define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
  306. if (TY->isFloatTy()) { \
  307. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  308. Dest.IntVal = APInt(1,false); \
  309. return Dest; \
  310. } \
  311. } else { \
  312. if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  313. Dest.IntVal = APInt(1,false); \
  314. return Dest; \
  315. } \
  316. }
  317. #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
  318. assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
  319. Dest.AggregateVal.resize( X.AggregateVal.size() ); \
  320. for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
  321. if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
  322. Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
  323. Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
  324. else { \
  325. Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
  326. } \
  327. }
  328. #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
  329. if (TY->isVectorTy()) { \
  330. if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
  331. MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
  332. } else { \
  333. MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
  334. } \
  335. } \
  336. static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
  337. Type *Ty)
  338. {
  339. GenericValue Dest;
  340. // if input is scalar value and Src1 or Src2 is NaN return false
  341. IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
  342. // if vector input detect NaNs and fill mask
  343. MASK_VECTOR_NANS(Ty, Src1, Src2, false)
  344. GenericValue DestMask = Dest;
  345. switch (Ty->getTypeID()) {
  346. IMPLEMENT_FCMP(!=, Float);
  347. IMPLEMENT_FCMP(!=, Double);
  348. IMPLEMENT_VECTOR_FCMP(!=);
  349. default:
  350. dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
  351. llvm_unreachable(nullptr);
  352. }
  353. // in vector case mask out NaN elements
  354. if (Ty->isVectorTy())
  355. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  356. if (DestMask.AggregateVal[_i].IntVal == false)
  357. Dest.AggregateVal[_i].IntVal = APInt(1,false);
  358. return Dest;
  359. }
  360. static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
  361. Type *Ty) {
  362. GenericValue Dest;
  363. switch (Ty->getTypeID()) {
  364. IMPLEMENT_FCMP(<=, Float);
  365. IMPLEMENT_FCMP(<=, Double);
  366. IMPLEMENT_VECTOR_FCMP(<=);
  367. default:
  368. dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
  369. llvm_unreachable(nullptr);
  370. }
  371. return Dest;
  372. }
  373. static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
  374. Type *Ty) {
  375. GenericValue Dest;
  376. switch (Ty->getTypeID()) {
  377. IMPLEMENT_FCMP(>=, Float);
  378. IMPLEMENT_FCMP(>=, Double);
  379. IMPLEMENT_VECTOR_FCMP(>=);
  380. default:
  381. dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
  382. llvm_unreachable(nullptr);
  383. }
  384. return Dest;
  385. }
  386. static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
  387. Type *Ty) {
  388. GenericValue Dest;
  389. switch (Ty->getTypeID()) {
  390. IMPLEMENT_FCMP(<, Float);
  391. IMPLEMENT_FCMP(<, Double);
  392. IMPLEMENT_VECTOR_FCMP(<);
  393. default:
  394. dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
  395. llvm_unreachable(nullptr);
  396. }
  397. return Dest;
  398. }
  399. static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
  400. Type *Ty) {
  401. GenericValue Dest;
  402. switch (Ty->getTypeID()) {
  403. IMPLEMENT_FCMP(>, Float);
  404. IMPLEMENT_FCMP(>, Double);
  405. IMPLEMENT_VECTOR_FCMP(>);
  406. default:
  407. dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
  408. llvm_unreachable(nullptr);
  409. }
  410. return Dest;
  411. }
  412. #define IMPLEMENT_UNORDERED(TY, X,Y) \
  413. if (TY->isFloatTy()) { \
  414. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  415. Dest.IntVal = APInt(1,true); \
  416. return Dest; \
  417. } \
  418. } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  419. Dest.IntVal = APInt(1,true); \
  420. return Dest; \
  421. }
  422. #define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
  423. if (TY->isVectorTy()) { \
  424. GenericValue DestMask = Dest; \
  425. Dest = FUNC(Src1, Src2, Ty); \
  426. for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
  427. if (DestMask.AggregateVal[_i].IntVal == true) \
  428. Dest.AggregateVal[_i].IntVal = APInt(1, true); \
  429. return Dest; \
  430. }
  431. static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
  432. Type *Ty) {
  433. GenericValue Dest;
  434. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  435. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  436. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
  437. return executeFCMP_OEQ(Src1, Src2, Ty);
  438. }
  439. static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
  440. Type *Ty) {
  441. GenericValue Dest;
  442. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  443. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  444. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
  445. return executeFCMP_ONE(Src1, Src2, Ty);
  446. }
  447. static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
  448. Type *Ty) {
  449. GenericValue Dest;
  450. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  451. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  452. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
  453. return executeFCMP_OLE(Src1, Src2, Ty);
  454. }
  455. static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
  456. Type *Ty) {
  457. GenericValue Dest;
  458. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  459. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  460. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
  461. return executeFCMP_OGE(Src1, Src2, Ty);
  462. }
  463. static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
  464. Type *Ty) {
  465. GenericValue Dest;
  466. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  467. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  468. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
  469. return executeFCMP_OLT(Src1, Src2, Ty);
  470. }
  471. static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
  472. Type *Ty) {
  473. GenericValue Dest;
  474. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  475. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  476. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
  477. return executeFCMP_OGT(Src1, Src2, Ty);
  478. }
  479. static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
  480. Type *Ty) {
  481. GenericValue Dest;
  482. if(Ty->isVectorTy()) {
  483. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  484. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  485. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  486. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  487. Dest.AggregateVal[_i].IntVal = APInt(1,
  488. ( (Src1.AggregateVal[_i].FloatVal ==
  489. Src1.AggregateVal[_i].FloatVal) &&
  490. (Src2.AggregateVal[_i].FloatVal ==
  491. Src2.AggregateVal[_i].FloatVal)));
  492. } else {
  493. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  494. Dest.AggregateVal[_i].IntVal = APInt(1,
  495. ( (Src1.AggregateVal[_i].DoubleVal ==
  496. Src1.AggregateVal[_i].DoubleVal) &&
  497. (Src2.AggregateVal[_i].DoubleVal ==
  498. Src2.AggregateVal[_i].DoubleVal)));
  499. }
  500. } else if (Ty->isFloatTy())
  501. Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
  502. Src2.FloatVal == Src2.FloatVal));
  503. else {
  504. Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
  505. Src2.DoubleVal == Src2.DoubleVal));
  506. }
  507. return Dest;
  508. }
  509. static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
  510. Type *Ty) {
  511. GenericValue Dest;
  512. if(Ty->isVectorTy()) {
  513. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  514. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  515. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  516. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  517. Dest.AggregateVal[_i].IntVal = APInt(1,
  518. ( (Src1.AggregateVal[_i].FloatVal !=
  519. Src1.AggregateVal[_i].FloatVal) ||
  520. (Src2.AggregateVal[_i].FloatVal !=
  521. Src2.AggregateVal[_i].FloatVal)));
  522. } else {
  523. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  524. Dest.AggregateVal[_i].IntVal = APInt(1,
  525. ( (Src1.AggregateVal[_i].DoubleVal !=
  526. Src1.AggregateVal[_i].DoubleVal) ||
  527. (Src2.AggregateVal[_i].DoubleVal !=
  528. Src2.AggregateVal[_i].DoubleVal)));
  529. }
  530. } else if (Ty->isFloatTy())
  531. Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
  532. Src2.FloatVal != Src2.FloatVal));
  533. else {
  534. Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
  535. Src2.DoubleVal != Src2.DoubleVal));
  536. }
  537. return Dest;
  538. }
  539. static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
  540. Type *Ty, const bool val) {
  541. GenericValue Dest;
  542. if(Ty->isVectorTy()) {
  543. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  544. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  545. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  546. Dest.AggregateVal[_i].IntVal = APInt(1,val);
  547. } else {
  548. Dest.IntVal = APInt(1, val);
  549. }
  550. return Dest;
  551. }
  552. void Interpreter::visitFCmpInst(FCmpInst &I) {
  553. ExecutionContext &SF = ECStack.back();
  554. Type *Ty = I.getOperand(0)->getType();
  555. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  556. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  557. GenericValue R; // Result
  558. switch (I.getPredicate()) {
  559. default:
  560. dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
  561. llvm_unreachable(nullptr);
  562. break;
  563. case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
  564. break;
  565. case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
  566. break;
  567. case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
  568. case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
  569. case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
  570. case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
  571. case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
  572. case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
  573. case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
  574. case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
  575. case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
  576. case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
  577. case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
  578. case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
  579. case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
  580. case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
  581. }
  582. SetValue(&I, R, SF);
  583. }
  584. static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
  585. GenericValue Src2, Type *Ty) {
  586. GenericValue Result;
  587. switch (predicate) {
  588. case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
  589. case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
  590. case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
  591. case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
  592. case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
  593. case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
  594. case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
  595. case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
  596. case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
  597. case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
  598. case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
  599. case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
  600. case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
  601. case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
  602. case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
  603. case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
  604. case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
  605. case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
  606. case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
  607. case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
  608. case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
  609. case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
  610. case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
  611. case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
  612. case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
  613. case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
  614. default:
  615. dbgs() << "Unhandled Cmp predicate\n";
  616. llvm_unreachable(nullptr);
  617. }
  618. }
  619. void Interpreter::visitBinaryOperator(BinaryOperator &I) {
  620. ExecutionContext &SF = ECStack.back();
  621. Type *Ty = I.getOperand(0)->getType();
  622. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  623. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  624. GenericValue R; // Result
  625. // First process vector operation
  626. if (Ty->isVectorTy()) {
  627. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  628. R.AggregateVal.resize(Src1.AggregateVal.size());
  629. // Macros to execute binary operation 'OP' over integer vectors
  630. #define INTEGER_VECTOR_OPERATION(OP) \
  631. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  632. R.AggregateVal[i].IntVal = \
  633. Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
  634. // Additional macros to execute binary operations udiv/sdiv/urem/srem since
  635. // they have different notation.
  636. #define INTEGER_VECTOR_FUNCTION(OP) \
  637. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  638. R.AggregateVal[i].IntVal = \
  639. Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
  640. // Macros to execute binary operation 'OP' over floating point type TY
  641. // (float or double) vectors
  642. #define FLOAT_VECTOR_FUNCTION(OP, TY) \
  643. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  644. R.AggregateVal[i].TY = \
  645. Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
  646. // Macros to choose appropriate TY: float or double and run operation
  647. // execution
  648. #define FLOAT_VECTOR_OP(OP) { \
  649. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
  650. FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
  651. else { \
  652. if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
  653. FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
  654. else { \
  655. dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
  656. llvm_unreachable(0); \
  657. } \
  658. } \
  659. }
  660. switch(I.getOpcode()){
  661. default:
  662. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  663. llvm_unreachable(nullptr);
  664. break;
  665. case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
  666. case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
  667. case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
  668. case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
  669. case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
  670. case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
  671. case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
  672. case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
  673. case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
  674. case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
  675. case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
  676. case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
  677. case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
  678. case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
  679. case Instruction::FRem:
  680. if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
  681. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  682. R.AggregateVal[i].FloatVal =
  683. fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
  684. else {
  685. if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
  686. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  687. R.AggregateVal[i].DoubleVal =
  688. fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
  689. else {
  690. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  691. llvm_unreachable(nullptr);
  692. }
  693. }
  694. break;
  695. }
  696. } else {
  697. switch (I.getOpcode()) {
  698. default:
  699. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  700. llvm_unreachable(nullptr);
  701. break;
  702. case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
  703. case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
  704. case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
  705. case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
  706. case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
  707. case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
  708. case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
  709. case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
  710. case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
  711. case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
  712. case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
  713. case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
  714. case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
  715. case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
  716. case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
  717. }
  718. }
  719. SetValue(&I, R, SF);
  720. }
  721. static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
  722. GenericValue Src3, Type *Ty) {
  723. GenericValue Dest;
  724. if(Ty->isVectorTy()) {
  725. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  726. assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
  727. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  728. for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
  729. Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
  730. Src3.AggregateVal[i] : Src2.AggregateVal[i];
  731. } else {
  732. Dest = (Src1.IntVal == 0) ? Src3 : Src2;
  733. }
  734. return Dest;
  735. }
  736. void Interpreter::visitSelectInst(SelectInst &I) {
  737. ExecutionContext &SF = ECStack.back();
  738. Type * Ty = I.getOperand(0)->getType();
  739. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  740. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  741. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  742. GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
  743. SetValue(&I, R, SF);
  744. }
  745. //===----------------------------------------------------------------------===//
  746. // Terminator Instruction Implementations
  747. //===----------------------------------------------------------------------===//
  748. void Interpreter::exitCalled(GenericValue GV) {
  749. // runAtExitHandlers() assumes there are no stack frames, but
  750. // if exit() was called, then it had a stack frame. Blow away
  751. // the stack before interpreting atexit handlers.
  752. ECStack.clear();
  753. runAtExitHandlers();
  754. exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
  755. }
  756. /// Pop the last stack frame off of ECStack and then copy the result
  757. /// back into the result variable if we are not returning void. The
  758. /// result variable may be the ExitValue, or the Value of the calling
  759. /// CallInst if there was a previous stack frame. This method may
  760. /// invalidate any ECStack iterators you have. This method also takes
  761. /// care of switching to the normal destination BB, if we are returning
  762. /// from an invoke.
  763. ///
  764. void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
  765. GenericValue Result) {
  766. // Pop the current stack frame.
  767. ECStack.pop_back();
  768. if (ECStack.empty()) { // Finished main. Put result into exit code...
  769. if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
  770. ExitValue = Result; // Capture the exit value of the program
  771. } else {
  772. memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
  773. }
  774. } else {
  775. // If we have a previous stack frame, and we have a previous call,
  776. // fill in the return value...
  777. ExecutionContext &CallingSF = ECStack.back();
  778. if (Instruction *I = CallingSF.Caller.getInstruction()) {
  779. // Save result...
  780. if (!CallingSF.Caller.getType()->isVoidTy())
  781. SetValue(I, Result, CallingSF);
  782. if (InvokeInst *II = dyn_cast<InvokeInst> (I))
  783. SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
  784. CallingSF.Caller = CallSite(); // We returned from the call...
  785. }
  786. }
  787. }
  788. void Interpreter::visitReturnInst(ReturnInst &I) {
  789. ExecutionContext &SF = ECStack.back();
  790. Type *RetTy = Type::getVoidTy(I.getContext());
  791. GenericValue Result;
  792. // Save away the return value... (if we are not 'ret void')
  793. if (I.getNumOperands()) {
  794. RetTy = I.getReturnValue()->getType();
  795. Result = getOperandValue(I.getReturnValue(), SF);
  796. }
  797. popStackAndReturnValueToCaller(RetTy, Result);
  798. }
  799. void Interpreter::visitUnreachableInst(UnreachableInst &I) {
  800. report_fatal_error("Program executed an 'unreachable' instruction!");
  801. }
  802. void Interpreter::visitBranchInst(BranchInst &I) {
  803. ExecutionContext &SF = ECStack.back();
  804. BasicBlock *Dest;
  805. Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
  806. if (!I.isUnconditional()) {
  807. Value *Cond = I.getCondition();
  808. if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
  809. Dest = I.getSuccessor(1);
  810. }
  811. SwitchToNewBasicBlock(Dest, SF);
  812. }
  813. void Interpreter::visitSwitchInst(SwitchInst &I) {
  814. ExecutionContext &SF = ECStack.back();
  815. Value* Cond = I.getCondition();
  816. Type *ElTy = Cond->getType();
  817. GenericValue CondVal = getOperandValue(Cond, SF);
  818. // Check to see if any of the cases match...
  819. BasicBlock *Dest = nullptr;
  820. for (auto Case : I.cases()) {
  821. GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
  822. if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
  823. Dest = cast<BasicBlock>(Case.getCaseSuccessor());
  824. break;
  825. }
  826. }
  827. if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
  828. SwitchToNewBasicBlock(Dest, SF);
  829. }
  830. void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
  831. ExecutionContext &SF = ECStack.back();
  832. void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
  833. SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
  834. }
  835. // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
  836. // This function handles the actual updating of block and instruction iterators
  837. // as well as execution of all of the PHI nodes in the destination block.
  838. //
  839. // This method does this because all of the PHI nodes must be executed
  840. // atomically, reading their inputs before any of the results are updated. Not
  841. // doing this can cause problems if the PHI nodes depend on other PHI nodes for
  842. // their inputs. If the input PHI node is updated before it is read, incorrect
  843. // results can happen. Thus we use a two phase approach.
  844. //
  845. void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
  846. BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
  847. SF.CurBB = Dest; // Update CurBB to branch destination
  848. SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
  849. if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
  850. // Loop over all of the PHI nodes in the current block, reading their inputs.
  851. std::vector<GenericValue> ResultValues;
  852. for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
  853. // Search for the value corresponding to this previous bb...
  854. int i = PN->getBasicBlockIndex(PrevBB);
  855. assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
  856. Value *IncomingValue = PN->getIncomingValue(i);
  857. // Save the incoming value for this PHI node...
  858. ResultValues.push_back(getOperandValue(IncomingValue, SF));
  859. }
  860. // Now loop over all of the PHI nodes setting their values...
  861. SF.CurInst = SF.CurBB->begin();
  862. for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
  863. PHINode *PN = cast<PHINode>(SF.CurInst);
  864. SetValue(PN, ResultValues[i], SF);
  865. }
  866. }
  867. //===----------------------------------------------------------------------===//
  868. // Memory Instruction Implementations
  869. //===----------------------------------------------------------------------===//
  870. void Interpreter::visitAllocaInst(AllocaInst &I) {
  871. ExecutionContext &SF = ECStack.back();
  872. Type *Ty = I.getType()->getElementType(); // Type to be allocated
  873. // Get the number of elements being allocated by the array...
  874. unsigned NumElements =
  875. getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
  876. unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
  877. // Avoid malloc-ing zero bytes, use max()...
  878. unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
  879. // Allocate enough memory to hold the type...
  880. void *Memory = malloc(MemToAlloc);
  881. DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
  882. << NumElements << " (Total: " << MemToAlloc << ") at "
  883. << uintptr_t(Memory) << '\n');
  884. GenericValue Result = PTOGV(Memory);
  885. assert(Result.PointerVal && "Null pointer returned by malloc!");
  886. SetValue(&I, Result, SF);
  887. if (I.getOpcode() == Instruction::Alloca)
  888. ECStack.back().Allocas.add(Memory);
  889. }
  890. // getElementOffset - The workhorse for getelementptr.
  891. //
  892. GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
  893. gep_type_iterator E,
  894. ExecutionContext &SF) {
  895. assert(Ptr->getType()->isPointerTy() &&
  896. "Cannot getElementOffset of a nonpointer type!");
  897. uint64_t Total = 0;
  898. for (; I != E; ++I) {
  899. if (StructType *STy = I.getStructTypeOrNull()) {
  900. const StructLayout *SLO = getDataLayout().getStructLayout(STy);
  901. const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
  902. unsigned Index = unsigned(CPU->getZExtValue());
  903. Total += SLO->getElementOffset(Index);
  904. } else {
  905. // Get the index number for the array... which must be long type...
  906. GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
  907. int64_t Idx;
  908. unsigned BitWidth =
  909. cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
  910. if (BitWidth == 32)
  911. Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
  912. else {
  913. assert(BitWidth == 64 && "Invalid index type for getelementptr");
  914. Idx = (int64_t)IdxGV.IntVal.getZExtValue();
  915. }
  916. Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
  917. }
  918. }
  919. GenericValue Result;
  920. Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
  921. DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
  922. return Result;
  923. }
  924. void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
  925. ExecutionContext &SF = ECStack.back();
  926. SetValue(&I, executeGEPOperation(I.getPointerOperand(),
  927. gep_type_begin(I), gep_type_end(I), SF), SF);
  928. }
  929. void Interpreter::visitLoadInst(LoadInst &I) {
  930. ExecutionContext &SF = ECStack.back();
  931. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  932. GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
  933. GenericValue Result;
  934. LoadValueFromMemory(Result, Ptr, I.getType());
  935. SetValue(&I, Result, SF);
  936. if (I.isVolatile() && PrintVolatile)
  937. dbgs() << "Volatile load " << I;
  938. }
  939. void Interpreter::visitStoreInst(StoreInst &I) {
  940. ExecutionContext &SF = ECStack.back();
  941. GenericValue Val = getOperandValue(I.getOperand(0), SF);
  942. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  943. StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
  944. I.getOperand(0)->getType());
  945. if (I.isVolatile() && PrintVolatile)
  946. dbgs() << "Volatile store: " << I;
  947. }
  948. //===----------------------------------------------------------------------===//
  949. // Miscellaneous Instruction Implementations
  950. //===----------------------------------------------------------------------===//
  951. void Interpreter::visitCallSite(CallSite CS) {
  952. ExecutionContext &SF = ECStack.back();
  953. // Check to see if this is an intrinsic function call...
  954. Function *F = CS.getCalledFunction();
  955. if (F && F->isDeclaration())
  956. switch (F->getIntrinsicID()) {
  957. case Intrinsic::not_intrinsic:
  958. break;
  959. case Intrinsic::vastart: { // va_start
  960. GenericValue ArgIndex;
  961. ArgIndex.UIntPairVal.first = ECStack.size() - 1;
  962. ArgIndex.UIntPairVal.second = 0;
  963. SetValue(CS.getInstruction(), ArgIndex, SF);
  964. return;
  965. }
  966. case Intrinsic::vaend: // va_end is a noop for the interpreter
  967. return;
  968. case Intrinsic::vacopy: // va_copy: dest = src
  969. SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
  970. return;
  971. default:
  972. // If it is an unknown intrinsic function, use the intrinsic lowering
  973. // class to transform it into hopefully tasty LLVM code.
  974. //
  975. BasicBlock::iterator me(CS.getInstruction());
  976. BasicBlock *Parent = CS.getInstruction()->getParent();
  977. bool atBegin(Parent->begin() == me);
  978. if (!atBegin)
  979. --me;
  980. IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
  981. // Restore the CurInst pointer to the first instruction newly inserted, if
  982. // any.
  983. if (atBegin) {
  984. SF.CurInst = Parent->begin();
  985. } else {
  986. SF.CurInst = me;
  987. ++SF.CurInst;
  988. }
  989. return;
  990. }
  991. SF.Caller = CS;
  992. std::vector<GenericValue> ArgVals;
  993. const unsigned NumArgs = SF.Caller.arg_size();
  994. ArgVals.reserve(NumArgs);
  995. uint16_t pNum = 1;
  996. for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
  997. e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
  998. Value *V = *i;
  999. ArgVals.push_back(getOperandValue(V, SF));
  1000. }
  1001. // To handle indirect calls, we must get the pointer value from the argument
  1002. // and treat it as a function pointer.
  1003. GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
  1004. callFunction((Function*)GVTOP(SRC), ArgVals);
  1005. }
  1006. // auxiliary function for shift operations
  1007. static unsigned getShiftAmount(uint64_t orgShiftAmount,
  1008. llvm::APInt valueToShift) {
  1009. unsigned valueWidth = valueToShift.getBitWidth();
  1010. if (orgShiftAmount < (uint64_t)valueWidth)
  1011. return orgShiftAmount;
  1012. // according to the llvm documentation, if orgShiftAmount > valueWidth,
  1013. // the result is undfeined. but we do shift by this rule:
  1014. return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
  1015. }
  1016. void Interpreter::visitShl(BinaryOperator &I) {
  1017. ExecutionContext &SF = ECStack.back();
  1018. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1019. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1020. GenericValue Dest;
  1021. Type *Ty = I.getType();
  1022. if (Ty->isVectorTy()) {
  1023. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1024. assert(src1Size == Src2.AggregateVal.size());
  1025. for (unsigned i = 0; i < src1Size; i++) {
  1026. GenericValue Result;
  1027. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1028. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1029. Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1030. Dest.AggregateVal.push_back(Result);
  1031. }
  1032. } else {
  1033. // scalar
  1034. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1035. llvm::APInt valueToShift = Src1.IntVal;
  1036. Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1037. }
  1038. SetValue(&I, Dest, SF);
  1039. }
  1040. void Interpreter::visitLShr(BinaryOperator &I) {
  1041. ExecutionContext &SF = ECStack.back();
  1042. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1043. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1044. GenericValue Dest;
  1045. Type *Ty = I.getType();
  1046. if (Ty->isVectorTy()) {
  1047. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1048. assert(src1Size == Src2.AggregateVal.size());
  1049. for (unsigned i = 0; i < src1Size; i++) {
  1050. GenericValue Result;
  1051. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1052. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1053. Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1054. Dest.AggregateVal.push_back(Result);
  1055. }
  1056. } else {
  1057. // scalar
  1058. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1059. llvm::APInt valueToShift = Src1.IntVal;
  1060. Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1061. }
  1062. SetValue(&I, Dest, SF);
  1063. }
  1064. void Interpreter::visitAShr(BinaryOperator &I) {
  1065. ExecutionContext &SF = ECStack.back();
  1066. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1067. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1068. GenericValue Dest;
  1069. Type *Ty = I.getType();
  1070. if (Ty->isVectorTy()) {
  1071. size_t src1Size = Src1.AggregateVal.size();
  1072. assert(src1Size == Src2.AggregateVal.size());
  1073. for (unsigned i = 0; i < src1Size; i++) {
  1074. GenericValue Result;
  1075. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1076. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1077. Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1078. Dest.AggregateVal.push_back(Result);
  1079. }
  1080. } else {
  1081. // scalar
  1082. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1083. llvm::APInt valueToShift = Src1.IntVal;
  1084. Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1085. }
  1086. SetValue(&I, Dest, SF);
  1087. }
  1088. GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
  1089. ExecutionContext &SF) {
  1090. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1091. Type *SrcTy = SrcVal->getType();
  1092. if (SrcTy->isVectorTy()) {
  1093. Type *DstVecTy = DstTy->getScalarType();
  1094. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1095. unsigned NumElts = Src.AggregateVal.size();
  1096. // the sizes of src and dst vectors must be equal
  1097. Dest.AggregateVal.resize(NumElts);
  1098. for (unsigned i = 0; i < NumElts; i++)
  1099. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
  1100. } else {
  1101. IntegerType *DITy = cast<IntegerType>(DstTy);
  1102. unsigned DBitWidth = DITy->getBitWidth();
  1103. Dest.IntVal = Src.IntVal.trunc(DBitWidth);
  1104. }
  1105. return Dest;
  1106. }
  1107. GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
  1108. ExecutionContext &SF) {
  1109. Type *SrcTy = SrcVal->getType();
  1110. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1111. if (SrcTy->isVectorTy()) {
  1112. Type *DstVecTy = DstTy->getScalarType();
  1113. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1114. unsigned size = Src.AggregateVal.size();
  1115. // the sizes of src and dst vectors must be equal.
  1116. Dest.AggregateVal.resize(size);
  1117. for (unsigned i = 0; i < size; i++)
  1118. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
  1119. } else {
  1120. auto *DITy = cast<IntegerType>(DstTy);
  1121. unsigned DBitWidth = DITy->getBitWidth();
  1122. Dest.IntVal = Src.IntVal.sext(DBitWidth);
  1123. }
  1124. return Dest;
  1125. }
  1126. GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
  1127. ExecutionContext &SF) {
  1128. Type *SrcTy = SrcVal->getType();
  1129. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1130. if (SrcTy->isVectorTy()) {
  1131. Type *DstVecTy = DstTy->getScalarType();
  1132. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1133. unsigned size = Src.AggregateVal.size();
  1134. // the sizes of src and dst vectors must be equal.
  1135. Dest.AggregateVal.resize(size);
  1136. for (unsigned i = 0; i < size; i++)
  1137. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
  1138. } else {
  1139. auto *DITy = cast<IntegerType>(DstTy);
  1140. unsigned DBitWidth = DITy->getBitWidth();
  1141. Dest.IntVal = Src.IntVal.zext(DBitWidth);
  1142. }
  1143. return Dest;
  1144. }
  1145. GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
  1146. ExecutionContext &SF) {
  1147. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1148. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1149. assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
  1150. DstTy->getScalarType()->isFloatTy() &&
  1151. "Invalid FPTrunc instruction");
  1152. unsigned size = Src.AggregateVal.size();
  1153. // the sizes of src and dst vectors must be equal.
  1154. Dest.AggregateVal.resize(size);
  1155. for (unsigned i = 0; i < size; i++)
  1156. Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
  1157. } else {
  1158. assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
  1159. "Invalid FPTrunc instruction");
  1160. Dest.FloatVal = (float)Src.DoubleVal;
  1161. }
  1162. return Dest;
  1163. }
  1164. GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
  1165. ExecutionContext &SF) {
  1166. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1167. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1168. assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
  1169. DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
  1170. unsigned size = Src.AggregateVal.size();
  1171. // the sizes of src and dst vectors must be equal.
  1172. Dest.AggregateVal.resize(size);
  1173. for (unsigned i = 0; i < size; i++)
  1174. Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
  1175. } else {
  1176. assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
  1177. "Invalid FPExt instruction");
  1178. Dest.DoubleVal = (double)Src.FloatVal;
  1179. }
  1180. return Dest;
  1181. }
  1182. GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
  1183. ExecutionContext &SF) {
  1184. Type *SrcTy = SrcVal->getType();
  1185. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1186. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1187. Type *DstVecTy = DstTy->getScalarType();
  1188. Type *SrcVecTy = SrcTy->getScalarType();
  1189. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1190. unsigned size = Src.AggregateVal.size();
  1191. // the sizes of src and dst vectors must be equal.
  1192. Dest.AggregateVal.resize(size);
  1193. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1194. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1195. for (unsigned i = 0; i < size; i++)
  1196. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1197. Src.AggregateVal[i].FloatVal, DBitWidth);
  1198. } else {
  1199. for (unsigned i = 0; i < size; i++)
  1200. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1201. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1202. }
  1203. } else {
  1204. // scalar
  1205. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1206. assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1207. if (SrcTy->getTypeID() == Type::FloatTyID)
  1208. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1209. else {
  1210. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1211. }
  1212. }
  1213. return Dest;
  1214. }
  1215. GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
  1216. ExecutionContext &SF) {
  1217. Type *SrcTy = SrcVal->getType();
  1218. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1219. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1220. Type *DstVecTy = DstTy->getScalarType();
  1221. Type *SrcVecTy = SrcTy->getScalarType();
  1222. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1223. unsigned size = Src.AggregateVal.size();
  1224. // the sizes of src and dst vectors must be equal
  1225. Dest.AggregateVal.resize(size);
  1226. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1227. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1228. for (unsigned i = 0; i < size; i++)
  1229. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1230. Src.AggregateVal[i].FloatVal, DBitWidth);
  1231. } else {
  1232. for (unsigned i = 0; i < size; i++)
  1233. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1234. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1235. }
  1236. } else {
  1237. // scalar
  1238. unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1239. assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1240. if (SrcTy->getTypeID() == Type::FloatTyID)
  1241. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1242. else {
  1243. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1244. }
  1245. }
  1246. return Dest;
  1247. }
  1248. GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
  1249. ExecutionContext &SF) {
  1250. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1251. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1252. Type *DstVecTy = DstTy->getScalarType();
  1253. unsigned size = Src.AggregateVal.size();
  1254. // the sizes of src and dst vectors must be equal
  1255. Dest.AggregateVal.resize(size);
  1256. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1257. assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1258. for (unsigned i = 0; i < size; i++)
  1259. Dest.AggregateVal[i].FloatVal =
  1260. APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
  1261. } else {
  1262. for (unsigned i = 0; i < size; i++)
  1263. Dest.AggregateVal[i].DoubleVal =
  1264. APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
  1265. }
  1266. } else {
  1267. // scalar
  1268. assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1269. if (DstTy->getTypeID() == Type::FloatTyID)
  1270. Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
  1271. else {
  1272. Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
  1273. }
  1274. }
  1275. return Dest;
  1276. }
  1277. GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
  1278. ExecutionContext &SF) {
  1279. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1280. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1281. Type *DstVecTy = DstTy->getScalarType();
  1282. unsigned size = Src.AggregateVal.size();
  1283. // the sizes of src and dst vectors must be equal
  1284. Dest.AggregateVal.resize(size);
  1285. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1286. assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1287. for (unsigned i = 0; i < size; i++)
  1288. Dest.AggregateVal[i].FloatVal =
  1289. APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
  1290. } else {
  1291. for (unsigned i = 0; i < size; i++)
  1292. Dest.AggregateVal[i].DoubleVal =
  1293. APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
  1294. }
  1295. } else {
  1296. // scalar
  1297. assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1298. if (DstTy->getTypeID() == Type::FloatTyID)
  1299. Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
  1300. else {
  1301. Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
  1302. }
  1303. }
  1304. return Dest;
  1305. }
  1306. GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
  1307. ExecutionContext &SF) {
  1308. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1309. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1310. assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
  1311. Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
  1312. return Dest;
  1313. }
  1314. GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
  1315. ExecutionContext &SF) {
  1316. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1317. assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
  1318. uint32_t PtrSize = getDataLayout().getPointerSizeInBits();
  1319. if (PtrSize != Src.IntVal.getBitWidth())
  1320. Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
  1321. Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
  1322. return Dest;
  1323. }
  1324. GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
  1325. ExecutionContext &SF) {
  1326. // This instruction supports bitwise conversion of vectors to integers and
  1327. // to vectors of other types (as long as they have the same size)
  1328. Type *SrcTy = SrcVal->getType();
  1329. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1330. if ((SrcTy->getTypeID() == Type::VectorTyID) ||
  1331. (DstTy->getTypeID() == Type::VectorTyID)) {
  1332. // vector src bitcast to vector dst or vector src bitcast to scalar dst or
  1333. // scalar src bitcast to vector dst
  1334. bool isLittleEndian = getDataLayout().isLittleEndian();
  1335. GenericValue TempDst, TempSrc, SrcVec;
  1336. Type *SrcElemTy;
  1337. Type *DstElemTy;
  1338. unsigned SrcBitSize;
  1339. unsigned DstBitSize;
  1340. unsigned SrcNum;
  1341. unsigned DstNum;
  1342. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1343. SrcElemTy = SrcTy->getScalarType();
  1344. SrcBitSize = SrcTy->getScalarSizeInBits();
  1345. SrcNum = Src.AggregateVal.size();
  1346. SrcVec = Src;
  1347. } else {
  1348. // if src is scalar value, make it vector <1 x type>
  1349. SrcElemTy = SrcTy;
  1350. SrcBitSize = SrcTy->getPrimitiveSizeInBits();
  1351. SrcNum = 1;
  1352. SrcVec.AggregateVal.push_back(Src);
  1353. }
  1354. if (DstTy->getTypeID() == Type::VectorTyID) {
  1355. DstElemTy = DstTy->getScalarType();
  1356. DstBitSize = DstTy->getScalarSizeInBits();
  1357. DstNum = (SrcNum * SrcBitSize) / DstBitSize;
  1358. } else {
  1359. DstElemTy = DstTy;
  1360. DstBitSize = DstTy->getPrimitiveSizeInBits();
  1361. DstNum = 1;
  1362. }
  1363. if (SrcNum * SrcBitSize != DstNum * DstBitSize)
  1364. llvm_unreachable("Invalid BitCast");
  1365. // If src is floating point, cast to integer first.
  1366. TempSrc.AggregateVal.resize(SrcNum);
  1367. if (SrcElemTy->isFloatTy()) {
  1368. for (unsigned i = 0; i < SrcNum; i++)
  1369. TempSrc.AggregateVal[i].IntVal =
  1370. APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
  1371. } else if (SrcElemTy->isDoubleTy()) {
  1372. for (unsigned i = 0; i < SrcNum; i++)
  1373. TempSrc.AggregateVal[i].IntVal =
  1374. APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
  1375. } else if (SrcElemTy->isIntegerTy()) {
  1376. for (unsigned i = 0; i < SrcNum; i++)
  1377. TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
  1378. } else {
  1379. // Pointers are not allowed as the element type of vector.
  1380. llvm_unreachable("Invalid Bitcast");
  1381. }
  1382. // now TempSrc is integer type vector
  1383. if (DstNum < SrcNum) {
  1384. // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
  1385. unsigned Ratio = SrcNum / DstNum;
  1386. unsigned SrcElt = 0;
  1387. for (unsigned i = 0; i < DstNum; i++) {
  1388. GenericValue Elt;
  1389. Elt.IntVal = 0;
  1390. Elt.IntVal = Elt.IntVal.zext(DstBitSize);
  1391. unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
  1392. for (unsigned j = 0; j < Ratio; j++) {
  1393. APInt Tmp;
  1394. Tmp = Tmp.zext(SrcBitSize);
  1395. Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
  1396. Tmp = Tmp.zext(DstBitSize);
  1397. Tmp <<= ShiftAmt;
  1398. ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
  1399. Elt.IntVal |= Tmp;
  1400. }
  1401. TempDst.AggregateVal.push_back(Elt);
  1402. }
  1403. } else {
  1404. // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
  1405. unsigned Ratio = DstNum / SrcNum;
  1406. for (unsigned i = 0; i < SrcNum; i++) {
  1407. unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
  1408. for (unsigned j = 0; j < Ratio; j++) {
  1409. GenericValue Elt;
  1410. Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
  1411. Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
  1412. Elt.IntVal.lshrInPlace(ShiftAmt);
  1413. // it could be DstBitSize == SrcBitSize, so check it
  1414. if (DstBitSize < SrcBitSize)
  1415. Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
  1416. ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
  1417. TempDst.AggregateVal.push_back(Elt);
  1418. }
  1419. }
  1420. }
  1421. // convert result from integer to specified type
  1422. if (DstTy->getTypeID() == Type::VectorTyID) {
  1423. if (DstElemTy->isDoubleTy()) {
  1424. Dest.AggregateVal.resize(DstNum);
  1425. for (unsigned i = 0; i < DstNum; i++)
  1426. Dest.AggregateVal[i].DoubleVal =
  1427. TempDst.AggregateVal[i].IntVal.bitsToDouble();
  1428. } else if (DstElemTy->isFloatTy()) {
  1429. Dest.AggregateVal.resize(DstNum);
  1430. for (unsigned i = 0; i < DstNum; i++)
  1431. Dest.AggregateVal[i].FloatVal =
  1432. TempDst.AggregateVal[i].IntVal.bitsToFloat();
  1433. } else {
  1434. Dest = TempDst;
  1435. }
  1436. } else {
  1437. if (DstElemTy->isDoubleTy())
  1438. Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
  1439. else if (DstElemTy->isFloatTy()) {
  1440. Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
  1441. } else {
  1442. Dest.IntVal = TempDst.AggregateVal[0].IntVal;
  1443. }
  1444. }
  1445. } else { // if ((SrcTy->getTypeID() == Type::VectorTyID) ||
  1446. // (DstTy->getTypeID() == Type::VectorTyID))
  1447. // scalar src bitcast to scalar dst
  1448. if (DstTy->isPointerTy()) {
  1449. assert(SrcTy->isPointerTy() && "Invalid BitCast");
  1450. Dest.PointerVal = Src.PointerVal;
  1451. } else if (DstTy->isIntegerTy()) {
  1452. if (SrcTy->isFloatTy())
  1453. Dest.IntVal = APInt::floatToBits(Src.FloatVal);
  1454. else if (SrcTy->isDoubleTy()) {
  1455. Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
  1456. } else if (SrcTy->isIntegerTy()) {
  1457. Dest.IntVal = Src.IntVal;
  1458. } else {
  1459. llvm_unreachable("Invalid BitCast");
  1460. }
  1461. } else if (DstTy->isFloatTy()) {
  1462. if (SrcTy->isIntegerTy())
  1463. Dest.FloatVal = Src.IntVal.bitsToFloat();
  1464. else {
  1465. Dest.FloatVal = Src.FloatVal;
  1466. }
  1467. } else if (DstTy->isDoubleTy()) {
  1468. if (SrcTy->isIntegerTy())
  1469. Dest.DoubleVal = Src.IntVal.bitsToDouble();
  1470. else {
  1471. Dest.DoubleVal = Src.DoubleVal;
  1472. }
  1473. } else {
  1474. llvm_unreachable("Invalid Bitcast");
  1475. }
  1476. }
  1477. return Dest;
  1478. }
  1479. void Interpreter::visitTruncInst(TruncInst &I) {
  1480. ExecutionContext &SF = ECStack.back();
  1481. SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1482. }
  1483. void Interpreter::visitSExtInst(SExtInst &I) {
  1484. ExecutionContext &SF = ECStack.back();
  1485. SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
  1486. }
  1487. void Interpreter::visitZExtInst(ZExtInst &I) {
  1488. ExecutionContext &SF = ECStack.back();
  1489. SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
  1490. }
  1491. void Interpreter::visitFPTruncInst(FPTruncInst &I) {
  1492. ExecutionContext &SF = ECStack.back();
  1493. SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1494. }
  1495. void Interpreter::visitFPExtInst(FPExtInst &I) {
  1496. ExecutionContext &SF = ECStack.back();
  1497. SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
  1498. }
  1499. void Interpreter::visitUIToFPInst(UIToFPInst &I) {
  1500. ExecutionContext &SF = ECStack.back();
  1501. SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1502. }
  1503. void Interpreter::visitSIToFPInst(SIToFPInst &I) {
  1504. ExecutionContext &SF = ECStack.back();
  1505. SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1506. }
  1507. void Interpreter::visitFPToUIInst(FPToUIInst &I) {
  1508. ExecutionContext &SF = ECStack.back();
  1509. SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
  1510. }
  1511. void Interpreter::visitFPToSIInst(FPToSIInst &I) {
  1512. ExecutionContext &SF = ECStack.back();
  1513. SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
  1514. }
  1515. void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
  1516. ExecutionContext &SF = ECStack.back();
  1517. SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
  1518. }
  1519. void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
  1520. ExecutionContext &SF = ECStack.back();
  1521. SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
  1522. }
  1523. void Interpreter::visitBitCastInst(BitCastInst &I) {
  1524. ExecutionContext &SF = ECStack.back();
  1525. SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
  1526. }
  1527. #define IMPLEMENT_VAARG(TY) \
  1528. case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
  1529. void Interpreter::visitVAArgInst(VAArgInst &I) {
  1530. ExecutionContext &SF = ECStack.back();
  1531. // Get the incoming valist parameter. LLI treats the valist as a
  1532. // (ec-stack-depth var-arg-index) pair.
  1533. GenericValue VAList = getOperandValue(I.getOperand(0), SF);
  1534. GenericValue Dest;
  1535. GenericValue Src = ECStack[VAList.UIntPairVal.first]
  1536. .VarArgs[VAList.UIntPairVal.second];
  1537. Type *Ty = I.getType();
  1538. switch (Ty->getTypeID()) {
  1539. case Type::IntegerTyID:
  1540. Dest.IntVal = Src.IntVal;
  1541. break;
  1542. IMPLEMENT_VAARG(Pointer);
  1543. IMPLEMENT_VAARG(Float);
  1544. IMPLEMENT_VAARG(Double);
  1545. default:
  1546. dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
  1547. llvm_unreachable(nullptr);
  1548. }
  1549. // Set the Value of this Instruction.
  1550. SetValue(&I, Dest, SF);
  1551. // Move the pointer to the next vararg.
  1552. ++VAList.UIntPairVal.second;
  1553. }
  1554. void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
  1555. ExecutionContext &SF = ECStack.back();
  1556. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1557. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1558. GenericValue Dest;
  1559. Type *Ty = I.getType();
  1560. const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
  1561. if(Src1.AggregateVal.size() > indx) {
  1562. switch (Ty->getTypeID()) {
  1563. default:
  1564. dbgs() << "Unhandled destination type for extractelement instruction: "
  1565. << *Ty << "\n";
  1566. llvm_unreachable(nullptr);
  1567. break;
  1568. case Type::IntegerTyID:
  1569. Dest.IntVal = Src1.AggregateVal[indx].IntVal;
  1570. break;
  1571. case Type::FloatTyID:
  1572. Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
  1573. break;
  1574. case Type::DoubleTyID:
  1575. Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
  1576. break;
  1577. }
  1578. } else {
  1579. dbgs() << "Invalid index in extractelement instruction\n";
  1580. }
  1581. SetValue(&I, Dest, SF);
  1582. }
  1583. void Interpreter::visitInsertElementInst(InsertElementInst &I) {
  1584. ExecutionContext &SF = ECStack.back();
  1585. Type *Ty = I.getType();
  1586. if(!(Ty->isVectorTy()) )
  1587. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1588. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1589. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1590. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1591. GenericValue Dest;
  1592. Type *TyContained = Ty->getContainedType(0);
  1593. const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
  1594. Dest.AggregateVal = Src1.AggregateVal;
  1595. if(Src1.AggregateVal.size() <= indx)
  1596. llvm_unreachable("Invalid index in insertelement instruction");
  1597. switch (TyContained->getTypeID()) {
  1598. default:
  1599. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1600. case Type::IntegerTyID:
  1601. Dest.AggregateVal[indx].IntVal = Src2.IntVal;
  1602. break;
  1603. case Type::FloatTyID:
  1604. Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
  1605. break;
  1606. case Type::DoubleTyID:
  1607. Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
  1608. break;
  1609. }
  1610. SetValue(&I, Dest, SF);
  1611. }
  1612. void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
  1613. ExecutionContext &SF = ECStack.back();
  1614. Type *Ty = I.getType();
  1615. if(!(Ty->isVectorTy()))
  1616. llvm_unreachable("Unhandled dest type for shufflevector instruction");
  1617. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1618. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1619. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1620. GenericValue Dest;
  1621. // There is no need to check types of src1 and src2, because the compiled
  1622. // bytecode can't contain different types for src1 and src2 for a
  1623. // shufflevector instruction.
  1624. Type *TyContained = Ty->getContainedType(0);
  1625. unsigned src1Size = (unsigned)Src1.AggregateVal.size();
  1626. unsigned src2Size = (unsigned)Src2.AggregateVal.size();
  1627. unsigned src3Size = (unsigned)Src3.AggregateVal.size();
  1628. Dest.AggregateVal.resize(src3Size);
  1629. switch (TyContained->getTypeID()) {
  1630. default:
  1631. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1632. break;
  1633. case Type::IntegerTyID:
  1634. for( unsigned i=0; i<src3Size; i++) {
  1635. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1636. if(j < src1Size)
  1637. Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
  1638. else if(j < src1Size + src2Size)
  1639. Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
  1640. else
  1641. // The selector may not be greater than sum of lengths of first and
  1642. // second operands and llasm should not allow situation like
  1643. // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
  1644. // <2 x i32> < i32 0, i32 5 >,
  1645. // where i32 5 is invalid, but let it be additional check here:
  1646. llvm_unreachable("Invalid mask in shufflevector instruction");
  1647. }
  1648. break;
  1649. case Type::FloatTyID:
  1650. for( unsigned i=0; i<src3Size; i++) {
  1651. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1652. if(j < src1Size)
  1653. Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
  1654. else if(j < src1Size + src2Size)
  1655. Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
  1656. else
  1657. llvm_unreachable("Invalid mask in shufflevector instruction");
  1658. }
  1659. break;
  1660. case Type::DoubleTyID:
  1661. for( unsigned i=0; i<src3Size; i++) {
  1662. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1663. if(j < src1Size)
  1664. Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
  1665. else if(j < src1Size + src2Size)
  1666. Dest.AggregateVal[i].DoubleVal =
  1667. Src2.AggregateVal[j-src1Size].DoubleVal;
  1668. else
  1669. llvm_unreachable("Invalid mask in shufflevector instruction");
  1670. }
  1671. break;
  1672. }
  1673. SetValue(&I, Dest, SF);
  1674. }
  1675. void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
  1676. ExecutionContext &SF = ECStack.back();
  1677. Value *Agg = I.getAggregateOperand();
  1678. GenericValue Dest;
  1679. GenericValue Src = getOperandValue(Agg, SF);
  1680. ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
  1681. unsigned Num = I.getNumIndices();
  1682. GenericValue *pSrc = &Src;
  1683. for (unsigned i = 0 ; i < Num; ++i) {
  1684. pSrc = &pSrc->AggregateVal[*IdxBegin];
  1685. ++IdxBegin;
  1686. }
  1687. Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
  1688. switch (IndexedType->getTypeID()) {
  1689. default:
  1690. llvm_unreachable("Unhandled dest type for extractelement instruction");
  1691. break;
  1692. case Type::IntegerTyID:
  1693. Dest.IntVal = pSrc->IntVal;
  1694. break;
  1695. case Type::FloatTyID:
  1696. Dest.FloatVal = pSrc->FloatVal;
  1697. break;
  1698. case Type::DoubleTyID:
  1699. Dest.DoubleVal = pSrc->DoubleVal;
  1700. break;
  1701. case Type::ArrayTyID:
  1702. case Type::StructTyID:
  1703. case Type::VectorTyID:
  1704. Dest.AggregateVal = pSrc->AggregateVal;
  1705. break;
  1706. case Type::PointerTyID:
  1707. Dest.PointerVal = pSrc->PointerVal;
  1708. break;
  1709. }
  1710. SetValue(&I, Dest, SF);
  1711. }
  1712. void Interpreter::visitInsertValueInst(InsertValueInst &I) {
  1713. ExecutionContext &SF = ECStack.back();
  1714. Value *Agg = I.getAggregateOperand();
  1715. GenericValue Src1 = getOperandValue(Agg, SF);
  1716. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1717. GenericValue Dest = Src1; // Dest is a slightly changed Src1
  1718. ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
  1719. unsigned Num = I.getNumIndices();
  1720. GenericValue *pDest = &Dest;
  1721. for (unsigned i = 0 ; i < Num; ++i) {
  1722. pDest = &pDest->AggregateVal[*IdxBegin];
  1723. ++IdxBegin;
  1724. }
  1725. // pDest points to the target value in the Dest now
  1726. Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
  1727. switch (IndexedType->getTypeID()) {
  1728. default:
  1729. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1730. break;
  1731. case Type::IntegerTyID:
  1732. pDest->IntVal = Src2.IntVal;
  1733. break;
  1734. case Type::FloatTyID:
  1735. pDest->FloatVal = Src2.FloatVal;
  1736. break;
  1737. case Type::DoubleTyID:
  1738. pDest->DoubleVal = Src2.DoubleVal;
  1739. break;
  1740. case Type::ArrayTyID:
  1741. case Type::StructTyID:
  1742. case Type::VectorTyID:
  1743. pDest->AggregateVal = Src2.AggregateVal;
  1744. break;
  1745. case Type::PointerTyID:
  1746. pDest->PointerVal = Src2.PointerVal;
  1747. break;
  1748. }
  1749. SetValue(&I, Dest, SF);
  1750. }
  1751. GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
  1752. ExecutionContext &SF) {
  1753. switch (CE->getOpcode()) {
  1754. case Instruction::Trunc:
  1755. return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
  1756. case Instruction::ZExt:
  1757. return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
  1758. case Instruction::SExt:
  1759. return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
  1760. case Instruction::FPTrunc:
  1761. return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
  1762. case Instruction::FPExt:
  1763. return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
  1764. case Instruction::UIToFP:
  1765. return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1766. case Instruction::SIToFP:
  1767. return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1768. case Instruction::FPToUI:
  1769. return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
  1770. case Instruction::FPToSI:
  1771. return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
  1772. case Instruction::PtrToInt:
  1773. return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
  1774. case Instruction::IntToPtr:
  1775. return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
  1776. case Instruction::BitCast:
  1777. return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
  1778. case Instruction::GetElementPtr:
  1779. return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
  1780. gep_type_end(CE), SF);
  1781. case Instruction::FCmp:
  1782. case Instruction::ICmp:
  1783. return executeCmpInst(CE->getPredicate(),
  1784. getOperandValue(CE->getOperand(0), SF),
  1785. getOperandValue(CE->getOperand(1), SF),
  1786. CE->getOperand(0)->getType());
  1787. case Instruction::Select:
  1788. return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
  1789. getOperandValue(CE->getOperand(1), SF),
  1790. getOperandValue(CE->getOperand(2), SF),
  1791. CE->getOperand(0)->getType());
  1792. default :
  1793. break;
  1794. }
  1795. // The cases below here require a GenericValue parameter for the result
  1796. // so we initialize one, compute it and then return it.
  1797. GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
  1798. GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
  1799. GenericValue Dest;
  1800. Type * Ty = CE->getOperand(0)->getType();
  1801. switch (CE->getOpcode()) {
  1802. case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
  1803. case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
  1804. case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
  1805. case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
  1806. case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
  1807. case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
  1808. case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
  1809. case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
  1810. case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
  1811. case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
  1812. case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
  1813. case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
  1814. case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
  1815. case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
  1816. case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
  1817. case Instruction::Shl:
  1818. Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
  1819. break;
  1820. case Instruction::LShr:
  1821. Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
  1822. break;
  1823. case Instruction::AShr:
  1824. Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
  1825. break;
  1826. default:
  1827. dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
  1828. llvm_unreachable("Unhandled ConstantExpr");
  1829. }
  1830. return Dest;
  1831. }
  1832. GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
  1833. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  1834. return getConstantExprValue(CE, SF);
  1835. } else if (Constant *CPV = dyn_cast<Constant>(V)) {
  1836. return getConstantValue(CPV);
  1837. } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
  1838. return PTOGV(getPointerToGlobal(GV));
  1839. } else {
  1840. return SF.Values[V];
  1841. }
  1842. }
  1843. //===----------------------------------------------------------------------===//
  1844. // Dispatch and Execution Code
  1845. //===----------------------------------------------------------------------===//
  1846. //===----------------------------------------------------------------------===//
  1847. // callFunction - Execute the specified function...
  1848. //
  1849. void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
  1850. assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
  1851. ECStack.back().Caller.arg_size() == ArgVals.size()) &&
  1852. "Incorrect number of arguments passed into function call!");
  1853. // Make a new stack frame... and fill it in.
  1854. ECStack.emplace_back();
  1855. ExecutionContext &StackFrame = ECStack.back();
  1856. StackFrame.CurFunction = F;
  1857. // Special handling for external functions.
  1858. if (F->isDeclaration()) {
  1859. GenericValue Result = callExternalFunction (F, ArgVals);
  1860. // Simulate a 'ret' instruction of the appropriate type.
  1861. popStackAndReturnValueToCaller (F->getReturnType (), Result);
  1862. return;
  1863. }
  1864. // Get pointers to first LLVM BB & Instruction in function.
  1865. StackFrame.CurBB = &F->front();
  1866. StackFrame.CurInst = StackFrame.CurBB->begin();
  1867. // Run through the function arguments and initialize their values...
  1868. assert((ArgVals.size() == F->arg_size() ||
  1869. (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
  1870. "Invalid number of values passed to function invocation!");
  1871. // Handle non-varargs arguments...
  1872. unsigned i = 0;
  1873. for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
  1874. AI != E; ++AI, ++i)
  1875. SetValue(&*AI, ArgVals[i], StackFrame);
  1876. // Handle varargs arguments...
  1877. StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
  1878. }
  1879. void Interpreter::run() {
  1880. while (!ECStack.empty()) {
  1881. // Interpret a single instruction & increment the "PC".
  1882. ExecutionContext &SF = ECStack.back(); // Current stack frame
  1883. Instruction &I = *SF.CurInst++; // Increment before execute
  1884. // Track the number of dynamic instructions executed.
  1885. ++NumDynamicInsts;
  1886. DEBUG(dbgs() << "About to interpret: " << I);
  1887. visit(I); // Dispatch to one of the visit* methods...
  1888. }
  1889. }