Execution.cpp 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086
  1. //===-- Execution.cpp - Implement code to simulate the program ------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the actual instruction interpreter.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #define DEBUG_TYPE "interpreter"
  14. #include "Interpreter.h"
  15. #include "llvm/ADT/APInt.h"
  16. #include "llvm/ADT/Statistic.h"
  17. #include "llvm/CodeGen/IntrinsicLowering.h"
  18. #include "llvm/IR/Constants.h"
  19. #include "llvm/IR/DerivedTypes.h"
  20. #include "llvm/IR/Instructions.h"
  21. #include "llvm/Support/CommandLine.h"
  22. #include "llvm/Support/Debug.h"
  23. #include "llvm/Support/ErrorHandling.h"
  24. #include "llvm/Support/GetElementPtrTypeIterator.h"
  25. #include "llvm/Support/MathExtras.h"
  26. #include <algorithm>
  27. #include <cmath>
  28. using namespace llvm;
  29. STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
  30. static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
  31. cl::desc("make the interpreter print every volatile load and store"));
  32. //===----------------------------------------------------------------------===//
  33. // Various Helper Functions
  34. //===----------------------------------------------------------------------===//
  35. static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
  36. SF.Values[V] = Val;
  37. }
  38. //===----------------------------------------------------------------------===//
  39. // Binary Instruction Implementations
  40. //===----------------------------------------------------------------------===//
  41. #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
  42. case Type::TY##TyID: \
  43. Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
  44. break
  45. static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
  46. GenericValue Src2, Type *Ty) {
  47. switch (Ty->getTypeID()) {
  48. IMPLEMENT_BINARY_OPERATOR(+, Float);
  49. IMPLEMENT_BINARY_OPERATOR(+, Double);
  50. default:
  51. dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
  52. llvm_unreachable(0);
  53. }
  54. }
  55. static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
  56. GenericValue Src2, Type *Ty) {
  57. switch (Ty->getTypeID()) {
  58. IMPLEMENT_BINARY_OPERATOR(-, Float);
  59. IMPLEMENT_BINARY_OPERATOR(-, Double);
  60. default:
  61. dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
  62. llvm_unreachable(0);
  63. }
  64. }
  65. static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
  66. GenericValue Src2, Type *Ty) {
  67. switch (Ty->getTypeID()) {
  68. IMPLEMENT_BINARY_OPERATOR(*, Float);
  69. IMPLEMENT_BINARY_OPERATOR(*, Double);
  70. default:
  71. dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
  72. llvm_unreachable(0);
  73. }
  74. }
  75. static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
  76. GenericValue Src2, Type *Ty) {
  77. switch (Ty->getTypeID()) {
  78. IMPLEMENT_BINARY_OPERATOR(/, Float);
  79. IMPLEMENT_BINARY_OPERATOR(/, Double);
  80. default:
  81. dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
  82. llvm_unreachable(0);
  83. }
  84. }
  85. static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
  86. GenericValue Src2, Type *Ty) {
  87. switch (Ty->getTypeID()) {
  88. case Type::FloatTyID:
  89. Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
  90. break;
  91. case Type::DoubleTyID:
  92. Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
  93. break;
  94. default:
  95. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  96. llvm_unreachable(0);
  97. }
  98. }
  99. #define IMPLEMENT_INTEGER_ICMP(OP, TY) \
  100. case Type::IntegerTyID: \
  101. Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
  102. break;
  103. #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
  104. case Type::VectorTyID: { \
  105. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  106. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  107. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  108. Dest.AggregateVal[_i].IntVal = APInt(1, \
  109. Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
  110. } break;
  111. // Handle pointers specially because they must be compared with only as much
  112. // width as the host has. We _do not_ want to be comparing 64 bit values when
  113. // running on a 32-bit target, otherwise the upper 32 bits might mess up
  114. // comparisons if they contain garbage.
  115. #define IMPLEMENT_POINTER_ICMP(OP) \
  116. case Type::PointerTyID: \
  117. Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
  118. (void*)(intptr_t)Src2.PointerVal); \
  119. break;
  120. static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
  121. Type *Ty) {
  122. GenericValue Dest;
  123. switch (Ty->getTypeID()) {
  124. IMPLEMENT_INTEGER_ICMP(eq,Ty);
  125. IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
  126. IMPLEMENT_POINTER_ICMP(==);
  127. default:
  128. dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
  129. llvm_unreachable(0);
  130. }
  131. return Dest;
  132. }
  133. static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
  134. Type *Ty) {
  135. GenericValue Dest;
  136. switch (Ty->getTypeID()) {
  137. IMPLEMENT_INTEGER_ICMP(ne,Ty);
  138. IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
  139. IMPLEMENT_POINTER_ICMP(!=);
  140. default:
  141. dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
  142. llvm_unreachable(0);
  143. }
  144. return Dest;
  145. }
  146. static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
  147. Type *Ty) {
  148. GenericValue Dest;
  149. switch (Ty->getTypeID()) {
  150. IMPLEMENT_INTEGER_ICMP(ult,Ty);
  151. IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
  152. IMPLEMENT_POINTER_ICMP(<);
  153. default:
  154. dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
  155. llvm_unreachable(0);
  156. }
  157. return Dest;
  158. }
  159. static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
  160. Type *Ty) {
  161. GenericValue Dest;
  162. switch (Ty->getTypeID()) {
  163. IMPLEMENT_INTEGER_ICMP(slt,Ty);
  164. IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
  165. IMPLEMENT_POINTER_ICMP(<);
  166. default:
  167. dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
  168. llvm_unreachable(0);
  169. }
  170. return Dest;
  171. }
  172. static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
  173. Type *Ty) {
  174. GenericValue Dest;
  175. switch (Ty->getTypeID()) {
  176. IMPLEMENT_INTEGER_ICMP(ugt,Ty);
  177. IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
  178. IMPLEMENT_POINTER_ICMP(>);
  179. default:
  180. dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
  181. llvm_unreachable(0);
  182. }
  183. return Dest;
  184. }
  185. static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
  186. Type *Ty) {
  187. GenericValue Dest;
  188. switch (Ty->getTypeID()) {
  189. IMPLEMENT_INTEGER_ICMP(sgt,Ty);
  190. IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
  191. IMPLEMENT_POINTER_ICMP(>);
  192. default:
  193. dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
  194. llvm_unreachable(0);
  195. }
  196. return Dest;
  197. }
  198. static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
  199. Type *Ty) {
  200. GenericValue Dest;
  201. switch (Ty->getTypeID()) {
  202. IMPLEMENT_INTEGER_ICMP(ule,Ty);
  203. IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
  204. IMPLEMENT_POINTER_ICMP(<=);
  205. default:
  206. dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
  207. llvm_unreachable(0);
  208. }
  209. return Dest;
  210. }
  211. static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
  212. Type *Ty) {
  213. GenericValue Dest;
  214. switch (Ty->getTypeID()) {
  215. IMPLEMENT_INTEGER_ICMP(sle,Ty);
  216. IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
  217. IMPLEMENT_POINTER_ICMP(<=);
  218. default:
  219. dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
  220. llvm_unreachable(0);
  221. }
  222. return Dest;
  223. }
  224. static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
  225. Type *Ty) {
  226. GenericValue Dest;
  227. switch (Ty->getTypeID()) {
  228. IMPLEMENT_INTEGER_ICMP(uge,Ty);
  229. IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
  230. IMPLEMENT_POINTER_ICMP(>=);
  231. default:
  232. dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
  233. llvm_unreachable(0);
  234. }
  235. return Dest;
  236. }
  237. static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
  238. Type *Ty) {
  239. GenericValue Dest;
  240. switch (Ty->getTypeID()) {
  241. IMPLEMENT_INTEGER_ICMP(sge,Ty);
  242. IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
  243. IMPLEMENT_POINTER_ICMP(>=);
  244. default:
  245. dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
  246. llvm_unreachable(0);
  247. }
  248. return Dest;
  249. }
  250. void Interpreter::visitICmpInst(ICmpInst &I) {
  251. ExecutionContext &SF = ECStack.back();
  252. Type *Ty = I.getOperand(0)->getType();
  253. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  254. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  255. GenericValue R; // Result
  256. switch (I.getPredicate()) {
  257. case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
  258. case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
  259. case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
  260. case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
  261. case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
  262. case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
  263. case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
  264. case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
  265. case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
  266. case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
  267. default:
  268. dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
  269. llvm_unreachable(0);
  270. }
  271. SetValue(&I, R, SF);
  272. }
  273. #define IMPLEMENT_FCMP(OP, TY) \
  274. case Type::TY##TyID: \
  275. Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
  276. break
  277. #define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
  278. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  279. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  280. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  281. Dest.AggregateVal[_i].IntVal = APInt(1, \
  282. Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
  283. break;
  284. #define IMPLEMENT_VECTOR_FCMP(OP) \
  285. case Type::VectorTyID: \
  286. if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
  287. IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
  288. } else { \
  289. IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
  290. }
  291. static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
  292. Type *Ty) {
  293. GenericValue Dest;
  294. switch (Ty->getTypeID()) {
  295. IMPLEMENT_FCMP(==, Float);
  296. IMPLEMENT_FCMP(==, Double);
  297. IMPLEMENT_VECTOR_FCMP(==);
  298. default:
  299. dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
  300. llvm_unreachable(0);
  301. }
  302. return Dest;
  303. }
  304. #define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
  305. if (TY->isFloatTy()) { \
  306. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  307. Dest.IntVal = APInt(1,false); \
  308. return Dest; \
  309. } \
  310. } else { \
  311. if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  312. Dest.IntVal = APInt(1,false); \
  313. return Dest; \
  314. } \
  315. }
  316. #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
  317. assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
  318. Dest.AggregateVal.resize( X.AggregateVal.size() ); \
  319. for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
  320. if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
  321. Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
  322. Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
  323. else { \
  324. Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
  325. } \
  326. }
  327. #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
  328. if (TY->isVectorTy()) { \
  329. if (dyn_cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
  330. MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
  331. } else { \
  332. MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
  333. } \
  334. } \
  335. static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
  336. Type *Ty)
  337. {
  338. GenericValue Dest;
  339. // if input is scalar value and Src1 or Src2 is NaN return false
  340. IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
  341. // if vector input detect NaNs and fill mask
  342. MASK_VECTOR_NANS(Ty, Src1, Src2, false)
  343. GenericValue DestMask = Dest;
  344. switch (Ty->getTypeID()) {
  345. IMPLEMENT_FCMP(!=, Float);
  346. IMPLEMENT_FCMP(!=, Double);
  347. IMPLEMENT_VECTOR_FCMP(!=);
  348. default:
  349. dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
  350. llvm_unreachable(0);
  351. }
  352. // in vector case mask out NaN elements
  353. if (Ty->isVectorTy())
  354. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  355. if (DestMask.AggregateVal[_i].IntVal == false)
  356. Dest.AggregateVal[_i].IntVal = APInt(1,false);
  357. return Dest;
  358. }
  359. static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
  360. Type *Ty) {
  361. GenericValue Dest;
  362. switch (Ty->getTypeID()) {
  363. IMPLEMENT_FCMP(<=, Float);
  364. IMPLEMENT_FCMP(<=, Double);
  365. IMPLEMENT_VECTOR_FCMP(<=);
  366. default:
  367. dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
  368. llvm_unreachable(0);
  369. }
  370. return Dest;
  371. }
  372. static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
  373. Type *Ty) {
  374. GenericValue Dest;
  375. switch (Ty->getTypeID()) {
  376. IMPLEMENT_FCMP(>=, Float);
  377. IMPLEMENT_FCMP(>=, Double);
  378. IMPLEMENT_VECTOR_FCMP(>=);
  379. default:
  380. dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
  381. llvm_unreachable(0);
  382. }
  383. return Dest;
  384. }
  385. static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
  386. Type *Ty) {
  387. GenericValue Dest;
  388. switch (Ty->getTypeID()) {
  389. IMPLEMENT_FCMP(<, Float);
  390. IMPLEMENT_FCMP(<, Double);
  391. IMPLEMENT_VECTOR_FCMP(<);
  392. default:
  393. dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
  394. llvm_unreachable(0);
  395. }
  396. return Dest;
  397. }
  398. static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
  399. Type *Ty) {
  400. GenericValue Dest;
  401. switch (Ty->getTypeID()) {
  402. IMPLEMENT_FCMP(>, Float);
  403. IMPLEMENT_FCMP(>, Double);
  404. IMPLEMENT_VECTOR_FCMP(>);
  405. default:
  406. dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
  407. llvm_unreachable(0);
  408. }
  409. return Dest;
  410. }
  411. #define IMPLEMENT_UNORDERED(TY, X,Y) \
  412. if (TY->isFloatTy()) { \
  413. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  414. Dest.IntVal = APInt(1,true); \
  415. return Dest; \
  416. } \
  417. } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  418. Dest.IntVal = APInt(1,true); \
  419. return Dest; \
  420. }
  421. #define IMPLEMENT_VECTOR_UNORDERED(TY, X,Y, _FUNC) \
  422. if (TY->isVectorTy()) { \
  423. GenericValue DestMask = Dest; \
  424. Dest = _FUNC(Src1, Src2, Ty); \
  425. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) \
  426. if (DestMask.AggregateVal[_i].IntVal == true) \
  427. Dest.AggregateVal[_i].IntVal = APInt(1,true); \
  428. return Dest; \
  429. }
  430. static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
  431. Type *Ty) {
  432. GenericValue Dest;
  433. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  434. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  435. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
  436. return executeFCMP_OEQ(Src1, Src2, Ty);
  437. }
  438. static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
  439. Type *Ty) {
  440. GenericValue Dest;
  441. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  442. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  443. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
  444. return executeFCMP_ONE(Src1, Src2, Ty);
  445. }
  446. static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
  447. Type *Ty) {
  448. GenericValue Dest;
  449. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  450. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  451. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
  452. return executeFCMP_OLE(Src1, Src2, Ty);
  453. }
  454. static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
  455. Type *Ty) {
  456. GenericValue Dest;
  457. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  458. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  459. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
  460. return executeFCMP_OGE(Src1, Src2, Ty);
  461. }
  462. static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
  463. Type *Ty) {
  464. GenericValue Dest;
  465. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  466. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  467. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
  468. return executeFCMP_OLT(Src1, Src2, Ty);
  469. }
  470. static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
  471. Type *Ty) {
  472. GenericValue Dest;
  473. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  474. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  475. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
  476. return executeFCMP_OGT(Src1, Src2, Ty);
  477. }
  478. static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
  479. Type *Ty) {
  480. GenericValue Dest;
  481. if(Ty->isVectorTy()) {
  482. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  483. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  484. if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  485. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  486. Dest.AggregateVal[_i].IntVal = APInt(1,
  487. ( (Src1.AggregateVal[_i].FloatVal ==
  488. Src1.AggregateVal[_i].FloatVal) &&
  489. (Src2.AggregateVal[_i].FloatVal ==
  490. Src2.AggregateVal[_i].FloatVal)));
  491. } else {
  492. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  493. Dest.AggregateVal[_i].IntVal = APInt(1,
  494. ( (Src1.AggregateVal[_i].DoubleVal ==
  495. Src1.AggregateVal[_i].DoubleVal) &&
  496. (Src2.AggregateVal[_i].DoubleVal ==
  497. Src2.AggregateVal[_i].DoubleVal)));
  498. }
  499. } else if (Ty->isFloatTy())
  500. Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
  501. Src2.FloatVal == Src2.FloatVal));
  502. else {
  503. Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
  504. Src2.DoubleVal == Src2.DoubleVal));
  505. }
  506. return Dest;
  507. }
  508. static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
  509. Type *Ty) {
  510. GenericValue Dest;
  511. if(Ty->isVectorTy()) {
  512. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  513. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  514. if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  515. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  516. Dest.AggregateVal[_i].IntVal = APInt(1,
  517. ( (Src1.AggregateVal[_i].FloatVal !=
  518. Src1.AggregateVal[_i].FloatVal) ||
  519. (Src2.AggregateVal[_i].FloatVal !=
  520. Src2.AggregateVal[_i].FloatVal)));
  521. } else {
  522. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  523. Dest.AggregateVal[_i].IntVal = APInt(1,
  524. ( (Src1.AggregateVal[_i].DoubleVal !=
  525. Src1.AggregateVal[_i].DoubleVal) ||
  526. (Src2.AggregateVal[_i].DoubleVal !=
  527. Src2.AggregateVal[_i].DoubleVal)));
  528. }
  529. } else if (Ty->isFloatTy())
  530. Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
  531. Src2.FloatVal != Src2.FloatVal));
  532. else {
  533. Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
  534. Src2.DoubleVal != Src2.DoubleVal));
  535. }
  536. return Dest;
  537. }
  538. static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
  539. const Type *Ty, const bool val) {
  540. GenericValue Dest;
  541. if(Ty->isVectorTy()) {
  542. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  543. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  544. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  545. Dest.AggregateVal[_i].IntVal = APInt(1,val);
  546. } else {
  547. Dest.IntVal = APInt(1, val);
  548. }
  549. return Dest;
  550. }
  551. void Interpreter::visitFCmpInst(FCmpInst &I) {
  552. ExecutionContext &SF = ECStack.back();
  553. Type *Ty = I.getOperand(0)->getType();
  554. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  555. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  556. GenericValue R; // Result
  557. switch (I.getPredicate()) {
  558. default:
  559. dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
  560. llvm_unreachable(0);
  561. break;
  562. case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
  563. break;
  564. case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
  565. break;
  566. case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
  567. case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
  568. case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
  569. case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
  570. case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
  571. case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
  572. case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
  573. case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
  574. case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
  575. case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
  576. case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
  577. case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
  578. case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
  579. case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
  580. }
  581. SetValue(&I, R, SF);
  582. }
  583. static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
  584. GenericValue Src2, Type *Ty) {
  585. GenericValue Result;
  586. switch (predicate) {
  587. case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
  588. case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
  589. case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
  590. case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
  591. case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
  592. case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
  593. case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
  594. case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
  595. case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
  596. case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
  597. case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
  598. case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
  599. case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
  600. case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
  601. case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
  602. case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
  603. case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
  604. case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
  605. case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
  606. case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
  607. case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
  608. case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
  609. case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
  610. case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
  611. case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
  612. case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
  613. default:
  614. dbgs() << "Unhandled Cmp predicate\n";
  615. llvm_unreachable(0);
  616. }
  617. }
  618. void Interpreter::visitBinaryOperator(BinaryOperator &I) {
  619. ExecutionContext &SF = ECStack.back();
  620. Type *Ty = I.getOperand(0)->getType();
  621. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  622. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  623. GenericValue R; // Result
  624. // First process vector operation
  625. if (Ty->isVectorTy()) {
  626. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  627. R.AggregateVal.resize(Src1.AggregateVal.size());
  628. // Macros to execute binary operation 'OP' over integer vectors
  629. #define INTEGER_VECTOR_OPERATION(OP) \
  630. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  631. R.AggregateVal[i].IntVal = \
  632. Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
  633. // Additional macros to execute binary operations udiv/sdiv/urem/srem since
  634. // they have different notation.
  635. #define INTEGER_VECTOR_FUNCTION(OP) \
  636. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  637. R.AggregateVal[i].IntVal = \
  638. Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
  639. // Macros to execute binary operation 'OP' over floating point type TY
  640. // (float or double) vectors
  641. #define FLOAT_VECTOR_FUNCTION(OP, TY) \
  642. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  643. R.AggregateVal[i].TY = \
  644. Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
  645. // Macros to choose appropriate TY: float or double and run operation
  646. // execution
  647. #define FLOAT_VECTOR_OP(OP) { \
  648. if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
  649. FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
  650. else { \
  651. if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
  652. FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
  653. else { \
  654. dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
  655. llvm_unreachable(0); \
  656. } \
  657. } \
  658. }
  659. switch(I.getOpcode()){
  660. default:
  661. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  662. llvm_unreachable(0);
  663. break;
  664. case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
  665. case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
  666. case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
  667. case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
  668. case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
  669. case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
  670. case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
  671. case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
  672. case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
  673. case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
  674. case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
  675. case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
  676. case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
  677. case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
  678. case Instruction::FRem:
  679. if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy())
  680. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  681. R.AggregateVal[i].FloatVal =
  682. fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
  683. else {
  684. if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy())
  685. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  686. R.AggregateVal[i].DoubleVal =
  687. fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
  688. else {
  689. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  690. llvm_unreachable(0);
  691. }
  692. }
  693. break;
  694. }
  695. } else {
  696. switch (I.getOpcode()) {
  697. default:
  698. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  699. llvm_unreachable(0);
  700. break;
  701. case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
  702. case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
  703. case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
  704. case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
  705. case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
  706. case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
  707. case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
  708. case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
  709. case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
  710. case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
  711. case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
  712. case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
  713. case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
  714. case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
  715. case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
  716. }
  717. }
  718. SetValue(&I, R, SF);
  719. }
  720. static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
  721. GenericValue Src3, const Type *Ty) {
  722. GenericValue Dest;
  723. if(Ty->isVectorTy()) {
  724. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  725. assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
  726. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  727. for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
  728. Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
  729. Src3.AggregateVal[i] : Src2.AggregateVal[i];
  730. } else {
  731. Dest = (Src1.IntVal == 0) ? Src3 : Src2;
  732. }
  733. return Dest;
  734. }
  735. void Interpreter::visitSelectInst(SelectInst &I) {
  736. ExecutionContext &SF = ECStack.back();
  737. const Type * Ty = I.getOperand(0)->getType();
  738. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  739. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  740. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  741. GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
  742. SetValue(&I, R, SF);
  743. }
  744. //===----------------------------------------------------------------------===//
  745. // Terminator Instruction Implementations
  746. //===----------------------------------------------------------------------===//
  747. void Interpreter::exitCalled(GenericValue GV) {
  748. // runAtExitHandlers() assumes there are no stack frames, but
  749. // if exit() was called, then it had a stack frame. Blow away
  750. // the stack before interpreting atexit handlers.
  751. ECStack.clear();
  752. runAtExitHandlers();
  753. exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
  754. }
  755. /// Pop the last stack frame off of ECStack and then copy the result
  756. /// back into the result variable if we are not returning void. The
  757. /// result variable may be the ExitValue, or the Value of the calling
  758. /// CallInst if there was a previous stack frame. This method may
  759. /// invalidate any ECStack iterators you have. This method also takes
  760. /// care of switching to the normal destination BB, if we are returning
  761. /// from an invoke.
  762. ///
  763. void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
  764. GenericValue Result) {
  765. // Pop the current stack frame.
  766. ECStack.pop_back();
  767. if (ECStack.empty()) { // Finished main. Put result into exit code...
  768. if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
  769. ExitValue = Result; // Capture the exit value of the program
  770. } else {
  771. memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
  772. }
  773. } else {
  774. // If we have a previous stack frame, and we have a previous call,
  775. // fill in the return value...
  776. ExecutionContext &CallingSF = ECStack.back();
  777. if (Instruction *I = CallingSF.Caller.getInstruction()) {
  778. // Save result...
  779. if (!CallingSF.Caller.getType()->isVoidTy())
  780. SetValue(I, Result, CallingSF);
  781. if (InvokeInst *II = dyn_cast<InvokeInst> (I))
  782. SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
  783. CallingSF.Caller = CallSite(); // We returned from the call...
  784. }
  785. }
  786. }
  787. void Interpreter::visitReturnInst(ReturnInst &I) {
  788. ExecutionContext &SF = ECStack.back();
  789. Type *RetTy = Type::getVoidTy(I.getContext());
  790. GenericValue Result;
  791. // Save away the return value... (if we are not 'ret void')
  792. if (I.getNumOperands()) {
  793. RetTy = I.getReturnValue()->getType();
  794. Result = getOperandValue(I.getReturnValue(), SF);
  795. }
  796. popStackAndReturnValueToCaller(RetTy, Result);
  797. }
  798. void Interpreter::visitUnreachableInst(UnreachableInst &I) {
  799. report_fatal_error("Program executed an 'unreachable' instruction!");
  800. }
  801. void Interpreter::visitBranchInst(BranchInst &I) {
  802. ExecutionContext &SF = ECStack.back();
  803. BasicBlock *Dest;
  804. Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
  805. if (!I.isUnconditional()) {
  806. Value *Cond = I.getCondition();
  807. if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
  808. Dest = I.getSuccessor(1);
  809. }
  810. SwitchToNewBasicBlock(Dest, SF);
  811. }
  812. void Interpreter::visitSwitchInst(SwitchInst &I) {
  813. ExecutionContext &SF = ECStack.back();
  814. Value* Cond = I.getCondition();
  815. Type *ElTy = Cond->getType();
  816. GenericValue CondVal = getOperandValue(Cond, SF);
  817. // Check to see if any of the cases match...
  818. BasicBlock *Dest = 0;
  819. for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
  820. IntegersSubset& Case = i.getCaseValueEx();
  821. if (Case.isSingleNumber()) {
  822. // FIXME: Currently work with ConstantInt based numbers.
  823. const ConstantInt *CI = Case.getSingleNumber(0).toConstantInt();
  824. GenericValue Val = getOperandValue(const_cast<ConstantInt*>(CI), SF);
  825. if (executeICMP_EQ(Val, CondVal, ElTy).IntVal != 0) {
  826. Dest = cast<BasicBlock>(i.getCaseSuccessor());
  827. break;
  828. }
  829. }
  830. if (Case.isSingleNumbersOnly()) {
  831. for (unsigned n = 0, en = Case.getNumItems(); n != en; ++n) {
  832. // FIXME: Currently work with ConstantInt based numbers.
  833. const ConstantInt *CI = Case.getSingleNumber(n).toConstantInt();
  834. GenericValue Val = getOperandValue(const_cast<ConstantInt*>(CI), SF);
  835. if (executeICMP_EQ(Val, CondVal, ElTy).IntVal != 0) {
  836. Dest = cast<BasicBlock>(i.getCaseSuccessor());
  837. break;
  838. }
  839. }
  840. } else
  841. for (unsigned n = 0, en = Case.getNumItems(); n != en; ++n) {
  842. IntegersSubset::Range r = Case.getItem(n);
  843. // FIXME: Currently work with ConstantInt based numbers.
  844. const ConstantInt *LowCI = r.getLow().toConstantInt();
  845. const ConstantInt *HighCI = r.getHigh().toConstantInt();
  846. GenericValue Low = getOperandValue(const_cast<ConstantInt*>(LowCI), SF);
  847. GenericValue High = getOperandValue(const_cast<ConstantInt*>(HighCI), SF);
  848. if (executeICMP_ULE(Low, CondVal, ElTy).IntVal != 0 &&
  849. executeICMP_ULE(CondVal, High, ElTy).IntVal != 0) {
  850. Dest = cast<BasicBlock>(i.getCaseSuccessor());
  851. break;
  852. }
  853. }
  854. }
  855. if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
  856. SwitchToNewBasicBlock(Dest, SF);
  857. }
  858. void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
  859. ExecutionContext &SF = ECStack.back();
  860. void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
  861. SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
  862. }
  863. // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
  864. // This function handles the actual updating of block and instruction iterators
  865. // as well as execution of all of the PHI nodes in the destination block.
  866. //
  867. // This method does this because all of the PHI nodes must be executed
  868. // atomically, reading their inputs before any of the results are updated. Not
  869. // doing this can cause problems if the PHI nodes depend on other PHI nodes for
  870. // their inputs. If the input PHI node is updated before it is read, incorrect
  871. // results can happen. Thus we use a two phase approach.
  872. //
  873. void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
  874. BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
  875. SF.CurBB = Dest; // Update CurBB to branch destination
  876. SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
  877. if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
  878. // Loop over all of the PHI nodes in the current block, reading their inputs.
  879. std::vector<GenericValue> ResultValues;
  880. for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
  881. // Search for the value corresponding to this previous bb...
  882. int i = PN->getBasicBlockIndex(PrevBB);
  883. assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
  884. Value *IncomingValue = PN->getIncomingValue(i);
  885. // Save the incoming value for this PHI node...
  886. ResultValues.push_back(getOperandValue(IncomingValue, SF));
  887. }
  888. // Now loop over all of the PHI nodes setting their values...
  889. SF.CurInst = SF.CurBB->begin();
  890. for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
  891. PHINode *PN = cast<PHINode>(SF.CurInst);
  892. SetValue(PN, ResultValues[i], SF);
  893. }
  894. }
  895. //===----------------------------------------------------------------------===//
  896. // Memory Instruction Implementations
  897. //===----------------------------------------------------------------------===//
  898. void Interpreter::visitAllocaInst(AllocaInst &I) {
  899. ExecutionContext &SF = ECStack.back();
  900. Type *Ty = I.getType()->getElementType(); // Type to be allocated
  901. // Get the number of elements being allocated by the array...
  902. unsigned NumElements =
  903. getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
  904. unsigned TypeSize = (size_t)TD.getTypeAllocSize(Ty);
  905. // Avoid malloc-ing zero bytes, use max()...
  906. unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
  907. // Allocate enough memory to hold the type...
  908. void *Memory = malloc(MemToAlloc);
  909. DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
  910. << NumElements << " (Total: " << MemToAlloc << ") at "
  911. << uintptr_t(Memory) << '\n');
  912. GenericValue Result = PTOGV(Memory);
  913. assert(Result.PointerVal != 0 && "Null pointer returned by malloc!");
  914. SetValue(&I, Result, SF);
  915. if (I.getOpcode() == Instruction::Alloca)
  916. ECStack.back().Allocas.add(Memory);
  917. }
  918. // getElementOffset - The workhorse for getelementptr.
  919. //
  920. GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
  921. gep_type_iterator E,
  922. ExecutionContext &SF) {
  923. assert(Ptr->getType()->isPointerTy() &&
  924. "Cannot getElementOffset of a nonpointer type!");
  925. uint64_t Total = 0;
  926. for (; I != E; ++I) {
  927. if (StructType *STy = dyn_cast<StructType>(*I)) {
  928. const StructLayout *SLO = TD.getStructLayout(STy);
  929. const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
  930. unsigned Index = unsigned(CPU->getZExtValue());
  931. Total += SLO->getElementOffset(Index);
  932. } else {
  933. SequentialType *ST = cast<SequentialType>(*I);
  934. // Get the index number for the array... which must be long type...
  935. GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
  936. int64_t Idx;
  937. unsigned BitWidth =
  938. cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
  939. if (BitWidth == 32)
  940. Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
  941. else {
  942. assert(BitWidth == 64 && "Invalid index type for getelementptr");
  943. Idx = (int64_t)IdxGV.IntVal.getZExtValue();
  944. }
  945. Total += TD.getTypeAllocSize(ST->getElementType())*Idx;
  946. }
  947. }
  948. GenericValue Result;
  949. Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
  950. DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
  951. return Result;
  952. }
  953. void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
  954. ExecutionContext &SF = ECStack.back();
  955. SetValue(&I, executeGEPOperation(I.getPointerOperand(),
  956. gep_type_begin(I), gep_type_end(I), SF), SF);
  957. }
  958. void Interpreter::visitLoadInst(LoadInst &I) {
  959. ExecutionContext &SF = ECStack.back();
  960. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  961. GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
  962. GenericValue Result;
  963. LoadValueFromMemory(Result, Ptr, I.getType());
  964. SetValue(&I, Result, SF);
  965. if (I.isVolatile() && PrintVolatile)
  966. dbgs() << "Volatile load " << I;
  967. }
  968. void Interpreter::visitStoreInst(StoreInst &I) {
  969. ExecutionContext &SF = ECStack.back();
  970. GenericValue Val = getOperandValue(I.getOperand(0), SF);
  971. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  972. StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
  973. I.getOperand(0)->getType());
  974. if (I.isVolatile() && PrintVolatile)
  975. dbgs() << "Volatile store: " << I;
  976. }
  977. //===----------------------------------------------------------------------===//
  978. // Miscellaneous Instruction Implementations
  979. //===----------------------------------------------------------------------===//
  980. void Interpreter::visitCallSite(CallSite CS) {
  981. ExecutionContext &SF = ECStack.back();
  982. // Check to see if this is an intrinsic function call...
  983. Function *F = CS.getCalledFunction();
  984. if (F && F->isDeclaration())
  985. switch (F->getIntrinsicID()) {
  986. case Intrinsic::not_intrinsic:
  987. break;
  988. case Intrinsic::vastart: { // va_start
  989. GenericValue ArgIndex;
  990. ArgIndex.UIntPairVal.first = ECStack.size() - 1;
  991. ArgIndex.UIntPairVal.second = 0;
  992. SetValue(CS.getInstruction(), ArgIndex, SF);
  993. return;
  994. }
  995. case Intrinsic::vaend: // va_end is a noop for the interpreter
  996. return;
  997. case Intrinsic::vacopy: // va_copy: dest = src
  998. SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
  999. return;
  1000. default:
  1001. // If it is an unknown intrinsic function, use the intrinsic lowering
  1002. // class to transform it into hopefully tasty LLVM code.
  1003. //
  1004. BasicBlock::iterator me(CS.getInstruction());
  1005. BasicBlock *Parent = CS.getInstruction()->getParent();
  1006. bool atBegin(Parent->begin() == me);
  1007. if (!atBegin)
  1008. --me;
  1009. IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
  1010. // Restore the CurInst pointer to the first instruction newly inserted, if
  1011. // any.
  1012. if (atBegin) {
  1013. SF.CurInst = Parent->begin();
  1014. } else {
  1015. SF.CurInst = me;
  1016. ++SF.CurInst;
  1017. }
  1018. return;
  1019. }
  1020. SF.Caller = CS;
  1021. std::vector<GenericValue> ArgVals;
  1022. const unsigned NumArgs = SF.Caller.arg_size();
  1023. ArgVals.reserve(NumArgs);
  1024. uint16_t pNum = 1;
  1025. for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
  1026. e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
  1027. Value *V = *i;
  1028. ArgVals.push_back(getOperandValue(V, SF));
  1029. }
  1030. // To handle indirect calls, we must get the pointer value from the argument
  1031. // and treat it as a function pointer.
  1032. GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
  1033. callFunction((Function*)GVTOP(SRC), ArgVals);
  1034. }
  1035. // auxilary function for shift operations
  1036. static unsigned getShiftAmount(uint64_t orgShiftAmount,
  1037. llvm::APInt valueToShift) {
  1038. unsigned valueWidth = valueToShift.getBitWidth();
  1039. if (orgShiftAmount < (uint64_t)valueWidth)
  1040. return orgShiftAmount;
  1041. // according to the llvm documentation, if orgShiftAmount > valueWidth,
  1042. // the result is undfeined. but we do shift by this rule:
  1043. return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
  1044. }
  1045. void Interpreter::visitShl(BinaryOperator &I) {
  1046. ExecutionContext &SF = ECStack.back();
  1047. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1048. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1049. GenericValue Dest;
  1050. const Type *Ty = I.getType();
  1051. if (Ty->isVectorTy()) {
  1052. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1053. assert(src1Size == Src2.AggregateVal.size());
  1054. for (unsigned i = 0; i < src1Size; i++) {
  1055. GenericValue Result;
  1056. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1057. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1058. Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1059. Dest.AggregateVal.push_back(Result);
  1060. }
  1061. } else {
  1062. // scalar
  1063. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1064. llvm::APInt valueToShift = Src1.IntVal;
  1065. Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1066. }
  1067. SetValue(&I, Dest, SF);
  1068. }
  1069. void Interpreter::visitLShr(BinaryOperator &I) {
  1070. ExecutionContext &SF = ECStack.back();
  1071. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1072. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1073. GenericValue Dest;
  1074. const Type *Ty = I.getType();
  1075. if (Ty->isVectorTy()) {
  1076. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1077. assert(src1Size == Src2.AggregateVal.size());
  1078. for (unsigned i = 0; i < src1Size; i++) {
  1079. GenericValue Result;
  1080. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1081. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1082. Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1083. Dest.AggregateVal.push_back(Result);
  1084. }
  1085. } else {
  1086. // scalar
  1087. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1088. llvm::APInt valueToShift = Src1.IntVal;
  1089. Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1090. }
  1091. SetValue(&I, Dest, SF);
  1092. }
  1093. void Interpreter::visitAShr(BinaryOperator &I) {
  1094. ExecutionContext &SF = ECStack.back();
  1095. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1096. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1097. GenericValue Dest;
  1098. const Type *Ty = I.getType();
  1099. if (Ty->isVectorTy()) {
  1100. size_t src1Size = Src1.AggregateVal.size();
  1101. assert(src1Size == Src2.AggregateVal.size());
  1102. for (unsigned i = 0; i < src1Size; i++) {
  1103. GenericValue Result;
  1104. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1105. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1106. Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1107. Dest.AggregateVal.push_back(Result);
  1108. }
  1109. } else {
  1110. // scalar
  1111. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1112. llvm::APInt valueToShift = Src1.IntVal;
  1113. Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1114. }
  1115. SetValue(&I, Dest, SF);
  1116. }
  1117. GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
  1118. ExecutionContext &SF) {
  1119. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1120. Type *SrcTy = SrcVal->getType();
  1121. if (SrcTy->isVectorTy()) {
  1122. Type *DstVecTy = DstTy->getScalarType();
  1123. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1124. unsigned NumElts = Src.AggregateVal.size();
  1125. // the sizes of src and dst vectors must be equal
  1126. Dest.AggregateVal.resize(NumElts);
  1127. for (unsigned i = 0; i < NumElts; i++)
  1128. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
  1129. } else {
  1130. IntegerType *DITy = cast<IntegerType>(DstTy);
  1131. unsigned DBitWidth = DITy->getBitWidth();
  1132. Dest.IntVal = Src.IntVal.trunc(DBitWidth);
  1133. }
  1134. return Dest;
  1135. }
  1136. GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
  1137. ExecutionContext &SF) {
  1138. const Type *SrcTy = SrcVal->getType();
  1139. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1140. if (SrcTy->isVectorTy()) {
  1141. const Type *DstVecTy = DstTy->getScalarType();
  1142. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1143. unsigned size = Src.AggregateVal.size();
  1144. // the sizes of src and dst vectors must be equal.
  1145. Dest.AggregateVal.resize(size);
  1146. for (unsigned i = 0; i < size; i++)
  1147. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
  1148. } else {
  1149. const IntegerType *DITy = cast<IntegerType>(DstTy);
  1150. unsigned DBitWidth = DITy->getBitWidth();
  1151. Dest.IntVal = Src.IntVal.sext(DBitWidth);
  1152. }
  1153. return Dest;
  1154. }
  1155. GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
  1156. ExecutionContext &SF) {
  1157. const Type *SrcTy = SrcVal->getType();
  1158. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1159. if (SrcTy->isVectorTy()) {
  1160. const Type *DstVecTy = DstTy->getScalarType();
  1161. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1162. unsigned size = Src.AggregateVal.size();
  1163. // the sizes of src and dst vectors must be equal.
  1164. Dest.AggregateVal.resize(size);
  1165. for (unsigned i = 0; i < size; i++)
  1166. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
  1167. } else {
  1168. const IntegerType *DITy = cast<IntegerType>(DstTy);
  1169. unsigned DBitWidth = DITy->getBitWidth();
  1170. Dest.IntVal = Src.IntVal.zext(DBitWidth);
  1171. }
  1172. return Dest;
  1173. }
  1174. GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
  1175. ExecutionContext &SF) {
  1176. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1177. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1178. assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
  1179. DstTy->getScalarType()->isFloatTy() &&
  1180. "Invalid FPTrunc instruction");
  1181. unsigned size = Src.AggregateVal.size();
  1182. // the sizes of src and dst vectors must be equal.
  1183. Dest.AggregateVal.resize(size);
  1184. for (unsigned i = 0; i < size; i++)
  1185. Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
  1186. } else {
  1187. assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
  1188. "Invalid FPTrunc instruction");
  1189. Dest.FloatVal = (float)Src.DoubleVal;
  1190. }
  1191. return Dest;
  1192. }
  1193. GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
  1194. ExecutionContext &SF) {
  1195. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1196. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1197. assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
  1198. DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
  1199. unsigned size = Src.AggregateVal.size();
  1200. // the sizes of src and dst vectors must be equal.
  1201. Dest.AggregateVal.resize(size);
  1202. for (unsigned i = 0; i < size; i++)
  1203. Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
  1204. } else {
  1205. assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
  1206. "Invalid FPExt instruction");
  1207. Dest.DoubleVal = (double)Src.FloatVal;
  1208. }
  1209. return Dest;
  1210. }
  1211. GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
  1212. ExecutionContext &SF) {
  1213. Type *SrcTy = SrcVal->getType();
  1214. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1215. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1216. const Type *DstVecTy = DstTy->getScalarType();
  1217. const Type *SrcVecTy = SrcTy->getScalarType();
  1218. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1219. unsigned size = Src.AggregateVal.size();
  1220. // the sizes of src and dst vectors must be equal.
  1221. Dest.AggregateVal.resize(size);
  1222. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1223. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1224. for (unsigned i = 0; i < size; i++)
  1225. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1226. Src.AggregateVal[i].FloatVal, DBitWidth);
  1227. } else {
  1228. for (unsigned i = 0; i < size; i++)
  1229. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1230. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1231. }
  1232. } else {
  1233. // scalar
  1234. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1235. assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1236. if (SrcTy->getTypeID() == Type::FloatTyID)
  1237. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1238. else {
  1239. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1240. }
  1241. }
  1242. return Dest;
  1243. }
  1244. GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
  1245. ExecutionContext &SF) {
  1246. Type *SrcTy = SrcVal->getType();
  1247. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1248. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1249. const Type *DstVecTy = DstTy->getScalarType();
  1250. const Type *SrcVecTy = SrcTy->getScalarType();
  1251. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1252. unsigned size = Src.AggregateVal.size();
  1253. // the sizes of src and dst vectors must be equal
  1254. Dest.AggregateVal.resize(size);
  1255. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1256. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1257. for (unsigned i = 0; i < size; i++)
  1258. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1259. Src.AggregateVal[i].FloatVal, DBitWidth);
  1260. } else {
  1261. for (unsigned i = 0; i < size; i++)
  1262. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1263. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1264. }
  1265. } else {
  1266. // scalar
  1267. unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1268. assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1269. if (SrcTy->getTypeID() == Type::FloatTyID)
  1270. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1271. else {
  1272. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1273. }
  1274. }
  1275. return Dest;
  1276. }
  1277. GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
  1278. ExecutionContext &SF) {
  1279. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1280. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1281. const Type *DstVecTy = DstTy->getScalarType();
  1282. unsigned size = Src.AggregateVal.size();
  1283. // the sizes of src and dst vectors must be equal
  1284. Dest.AggregateVal.resize(size);
  1285. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1286. assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1287. for (unsigned i = 0; i < size; i++)
  1288. Dest.AggregateVal[i].FloatVal =
  1289. APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
  1290. } else {
  1291. for (unsigned i = 0; i < size; i++)
  1292. Dest.AggregateVal[i].DoubleVal =
  1293. APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
  1294. }
  1295. } else {
  1296. // scalar
  1297. assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1298. if (DstTy->getTypeID() == Type::FloatTyID)
  1299. Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
  1300. else {
  1301. Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
  1302. }
  1303. }
  1304. return Dest;
  1305. }
  1306. GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
  1307. ExecutionContext &SF) {
  1308. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1309. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1310. const Type *DstVecTy = DstTy->getScalarType();
  1311. unsigned size = Src.AggregateVal.size();
  1312. // the sizes of src and dst vectors must be equal
  1313. Dest.AggregateVal.resize(size);
  1314. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1315. assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1316. for (unsigned i = 0; i < size; i++)
  1317. Dest.AggregateVal[i].FloatVal =
  1318. APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
  1319. } else {
  1320. for (unsigned i = 0; i < size; i++)
  1321. Dest.AggregateVal[i].DoubleVal =
  1322. APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
  1323. }
  1324. } else {
  1325. // scalar
  1326. assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1327. if (DstTy->getTypeID() == Type::FloatTyID)
  1328. Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
  1329. else {
  1330. Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
  1331. }
  1332. }
  1333. return Dest;
  1334. }
  1335. GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
  1336. ExecutionContext &SF) {
  1337. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1338. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1339. assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
  1340. Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
  1341. return Dest;
  1342. }
  1343. GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
  1344. ExecutionContext &SF) {
  1345. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1346. assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
  1347. uint32_t PtrSize = TD.getPointerSizeInBits();
  1348. if (PtrSize != Src.IntVal.getBitWidth())
  1349. Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
  1350. Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
  1351. return Dest;
  1352. }
  1353. GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
  1354. ExecutionContext &SF) {
  1355. // This instruction supports bitwise conversion of vectors to integers and
  1356. // to vectors of other types (as long as they have the same size)
  1357. Type *SrcTy = SrcVal->getType();
  1358. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1359. if ((SrcTy->getTypeID() == Type::VectorTyID) ||
  1360. (DstTy->getTypeID() == Type::VectorTyID)) {
  1361. // vector src bitcast to vector dst or vector src bitcast to scalar dst or
  1362. // scalar src bitcast to vector dst
  1363. bool isLittleEndian = TD.isLittleEndian();
  1364. GenericValue TempDst, TempSrc, SrcVec;
  1365. const Type *SrcElemTy;
  1366. const Type *DstElemTy;
  1367. unsigned SrcBitSize;
  1368. unsigned DstBitSize;
  1369. unsigned SrcNum;
  1370. unsigned DstNum;
  1371. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1372. SrcElemTy = SrcTy->getScalarType();
  1373. SrcBitSize = SrcTy->getScalarSizeInBits();
  1374. SrcNum = Src.AggregateVal.size();
  1375. SrcVec = Src;
  1376. } else {
  1377. // if src is scalar value, make it vector <1 x type>
  1378. SrcElemTy = SrcTy;
  1379. SrcBitSize = SrcTy->getPrimitiveSizeInBits();
  1380. SrcNum = 1;
  1381. SrcVec.AggregateVal.push_back(Src);
  1382. }
  1383. if (DstTy->getTypeID() == Type::VectorTyID) {
  1384. DstElemTy = DstTy->getScalarType();
  1385. DstBitSize = DstTy->getScalarSizeInBits();
  1386. DstNum = (SrcNum * SrcBitSize) / DstBitSize;
  1387. } else {
  1388. DstElemTy = DstTy;
  1389. DstBitSize = DstTy->getPrimitiveSizeInBits();
  1390. DstNum = 1;
  1391. }
  1392. if (SrcNum * SrcBitSize != DstNum * DstBitSize)
  1393. llvm_unreachable("Invalid BitCast");
  1394. // If src is floating point, cast to integer first.
  1395. TempSrc.AggregateVal.resize(SrcNum);
  1396. if (SrcElemTy->isFloatTy()) {
  1397. for (unsigned i = 0; i < SrcNum; i++)
  1398. TempSrc.AggregateVal[i].IntVal =
  1399. APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
  1400. } else if (SrcElemTy->isDoubleTy()) {
  1401. for (unsigned i = 0; i < SrcNum; i++)
  1402. TempSrc.AggregateVal[i].IntVal =
  1403. APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
  1404. } else if (SrcElemTy->isIntegerTy()) {
  1405. for (unsigned i = 0; i < SrcNum; i++)
  1406. TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
  1407. } else {
  1408. // Pointers are not allowed as the element type of vector.
  1409. llvm_unreachable("Invalid Bitcast");
  1410. }
  1411. // now TempSrc is integer type vector
  1412. if (DstNum < SrcNum) {
  1413. // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
  1414. unsigned Ratio = SrcNum / DstNum;
  1415. unsigned SrcElt = 0;
  1416. for (unsigned i = 0; i < DstNum; i++) {
  1417. GenericValue Elt;
  1418. Elt.IntVal = 0;
  1419. Elt.IntVal = Elt.IntVal.zext(DstBitSize);
  1420. unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
  1421. for (unsigned j = 0; j < Ratio; j++) {
  1422. APInt Tmp;
  1423. Tmp = Tmp.zext(SrcBitSize);
  1424. Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
  1425. Tmp = Tmp.zext(DstBitSize);
  1426. Tmp = Tmp.shl(ShiftAmt);
  1427. ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
  1428. Elt.IntVal |= Tmp;
  1429. }
  1430. TempDst.AggregateVal.push_back(Elt);
  1431. }
  1432. } else {
  1433. // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
  1434. unsigned Ratio = DstNum / SrcNum;
  1435. for (unsigned i = 0; i < SrcNum; i++) {
  1436. unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
  1437. for (unsigned j = 0; j < Ratio; j++) {
  1438. GenericValue Elt;
  1439. Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
  1440. Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
  1441. Elt.IntVal = Elt.IntVal.lshr(ShiftAmt);
  1442. // it could be DstBitSize == SrcBitSize, so check it
  1443. if (DstBitSize < SrcBitSize)
  1444. Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
  1445. ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
  1446. TempDst.AggregateVal.push_back(Elt);
  1447. }
  1448. }
  1449. }
  1450. // convert result from integer to specified type
  1451. if (DstTy->getTypeID() == Type::VectorTyID) {
  1452. if (DstElemTy->isDoubleTy()) {
  1453. Dest.AggregateVal.resize(DstNum);
  1454. for (unsigned i = 0; i < DstNum; i++)
  1455. Dest.AggregateVal[i].DoubleVal =
  1456. TempDst.AggregateVal[i].IntVal.bitsToDouble();
  1457. } else if (DstElemTy->isFloatTy()) {
  1458. Dest.AggregateVal.resize(DstNum);
  1459. for (unsigned i = 0; i < DstNum; i++)
  1460. Dest.AggregateVal[i].FloatVal =
  1461. TempDst.AggregateVal[i].IntVal.bitsToFloat();
  1462. } else {
  1463. Dest = TempDst;
  1464. }
  1465. } else {
  1466. if (DstElemTy->isDoubleTy())
  1467. Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
  1468. else if (DstElemTy->isFloatTy()) {
  1469. Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
  1470. } else {
  1471. Dest.IntVal = TempDst.AggregateVal[0].IntVal;
  1472. }
  1473. }
  1474. } else { // if ((SrcTy->getTypeID() == Type::VectorTyID) ||
  1475. // (DstTy->getTypeID() == Type::VectorTyID))
  1476. // scalar src bitcast to scalar dst
  1477. if (DstTy->isPointerTy()) {
  1478. assert(SrcTy->isPointerTy() && "Invalid BitCast");
  1479. Dest.PointerVal = Src.PointerVal;
  1480. } else if (DstTy->isIntegerTy()) {
  1481. if (SrcTy->isFloatTy())
  1482. Dest.IntVal = APInt::floatToBits(Src.FloatVal);
  1483. else if (SrcTy->isDoubleTy()) {
  1484. Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
  1485. } else if (SrcTy->isIntegerTy()) {
  1486. Dest.IntVal = Src.IntVal;
  1487. } else {
  1488. llvm_unreachable("Invalid BitCast");
  1489. }
  1490. } else if (DstTy->isFloatTy()) {
  1491. if (SrcTy->isIntegerTy())
  1492. Dest.FloatVal = Src.IntVal.bitsToFloat();
  1493. else {
  1494. Dest.FloatVal = Src.FloatVal;
  1495. }
  1496. } else if (DstTy->isDoubleTy()) {
  1497. if (SrcTy->isIntegerTy())
  1498. Dest.DoubleVal = Src.IntVal.bitsToDouble();
  1499. else {
  1500. Dest.DoubleVal = Src.DoubleVal;
  1501. }
  1502. } else {
  1503. llvm_unreachable("Invalid Bitcast");
  1504. }
  1505. }
  1506. return Dest;
  1507. }
  1508. void Interpreter::visitTruncInst(TruncInst &I) {
  1509. ExecutionContext &SF = ECStack.back();
  1510. SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1511. }
  1512. void Interpreter::visitSExtInst(SExtInst &I) {
  1513. ExecutionContext &SF = ECStack.back();
  1514. SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
  1515. }
  1516. void Interpreter::visitZExtInst(ZExtInst &I) {
  1517. ExecutionContext &SF = ECStack.back();
  1518. SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
  1519. }
  1520. void Interpreter::visitFPTruncInst(FPTruncInst &I) {
  1521. ExecutionContext &SF = ECStack.back();
  1522. SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1523. }
  1524. void Interpreter::visitFPExtInst(FPExtInst &I) {
  1525. ExecutionContext &SF = ECStack.back();
  1526. SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
  1527. }
  1528. void Interpreter::visitUIToFPInst(UIToFPInst &I) {
  1529. ExecutionContext &SF = ECStack.back();
  1530. SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1531. }
  1532. void Interpreter::visitSIToFPInst(SIToFPInst &I) {
  1533. ExecutionContext &SF = ECStack.back();
  1534. SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1535. }
  1536. void Interpreter::visitFPToUIInst(FPToUIInst &I) {
  1537. ExecutionContext &SF = ECStack.back();
  1538. SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
  1539. }
  1540. void Interpreter::visitFPToSIInst(FPToSIInst &I) {
  1541. ExecutionContext &SF = ECStack.back();
  1542. SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
  1543. }
  1544. void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
  1545. ExecutionContext &SF = ECStack.back();
  1546. SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
  1547. }
  1548. void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
  1549. ExecutionContext &SF = ECStack.back();
  1550. SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
  1551. }
  1552. void Interpreter::visitBitCastInst(BitCastInst &I) {
  1553. ExecutionContext &SF = ECStack.back();
  1554. SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
  1555. }
  1556. #define IMPLEMENT_VAARG(TY) \
  1557. case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
  1558. void Interpreter::visitVAArgInst(VAArgInst &I) {
  1559. ExecutionContext &SF = ECStack.back();
  1560. // Get the incoming valist parameter. LLI treats the valist as a
  1561. // (ec-stack-depth var-arg-index) pair.
  1562. GenericValue VAList = getOperandValue(I.getOperand(0), SF);
  1563. GenericValue Dest;
  1564. GenericValue Src = ECStack[VAList.UIntPairVal.first]
  1565. .VarArgs[VAList.UIntPairVal.second];
  1566. Type *Ty = I.getType();
  1567. switch (Ty->getTypeID()) {
  1568. case Type::IntegerTyID:
  1569. Dest.IntVal = Src.IntVal;
  1570. break;
  1571. IMPLEMENT_VAARG(Pointer);
  1572. IMPLEMENT_VAARG(Float);
  1573. IMPLEMENT_VAARG(Double);
  1574. default:
  1575. dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
  1576. llvm_unreachable(0);
  1577. }
  1578. // Set the Value of this Instruction.
  1579. SetValue(&I, Dest, SF);
  1580. // Move the pointer to the next vararg.
  1581. ++VAList.UIntPairVal.second;
  1582. }
  1583. void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
  1584. ExecutionContext &SF = ECStack.back();
  1585. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1586. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1587. GenericValue Dest;
  1588. Type *Ty = I.getType();
  1589. const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
  1590. if(Src1.AggregateVal.size() > indx) {
  1591. switch (Ty->getTypeID()) {
  1592. default:
  1593. dbgs() << "Unhandled destination type for extractelement instruction: "
  1594. << *Ty << "\n";
  1595. llvm_unreachable(0);
  1596. break;
  1597. case Type::IntegerTyID:
  1598. Dest.IntVal = Src1.AggregateVal[indx].IntVal;
  1599. break;
  1600. case Type::FloatTyID:
  1601. Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
  1602. break;
  1603. case Type::DoubleTyID:
  1604. Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
  1605. break;
  1606. }
  1607. } else {
  1608. dbgs() << "Invalid index in extractelement instruction\n";
  1609. }
  1610. SetValue(&I, Dest, SF);
  1611. }
  1612. void Interpreter::visitInsertElementInst(InsertElementInst &I) {
  1613. ExecutionContext &SF = ECStack.back();
  1614. Type *Ty = I.getType();
  1615. if(!(Ty->isVectorTy()) )
  1616. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1617. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1618. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1619. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1620. GenericValue Dest;
  1621. Type *TyContained = Ty->getContainedType(0);
  1622. const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
  1623. Dest.AggregateVal = Src1.AggregateVal;
  1624. if(Src1.AggregateVal.size() <= indx)
  1625. llvm_unreachable("Invalid index in insertelement instruction");
  1626. switch (TyContained->getTypeID()) {
  1627. default:
  1628. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1629. case Type::IntegerTyID:
  1630. Dest.AggregateVal[indx].IntVal = Src2.IntVal;
  1631. break;
  1632. case Type::FloatTyID:
  1633. Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
  1634. break;
  1635. case Type::DoubleTyID:
  1636. Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
  1637. break;
  1638. }
  1639. SetValue(&I, Dest, SF);
  1640. }
  1641. void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
  1642. ExecutionContext &SF = ECStack.back();
  1643. Type *Ty = I.getType();
  1644. if(!(Ty->isVectorTy()))
  1645. llvm_unreachable("Unhandled dest type for shufflevector instruction");
  1646. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1647. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1648. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1649. GenericValue Dest;
  1650. // There is no need to check types of src1 and src2, because the compiled
  1651. // bytecode can't contain different types for src1 and src2 for a
  1652. // shufflevector instruction.
  1653. Type *TyContained = Ty->getContainedType(0);
  1654. unsigned src1Size = (unsigned)Src1.AggregateVal.size();
  1655. unsigned src2Size = (unsigned)Src2.AggregateVal.size();
  1656. unsigned src3Size = (unsigned)Src3.AggregateVal.size();
  1657. Dest.AggregateVal.resize(src3Size);
  1658. switch (TyContained->getTypeID()) {
  1659. default:
  1660. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1661. break;
  1662. case Type::IntegerTyID:
  1663. for( unsigned i=0; i<src3Size; i++) {
  1664. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1665. if(j < src1Size)
  1666. Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
  1667. else if(j < src1Size + src2Size)
  1668. Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
  1669. else
  1670. // The selector may not be greater than sum of lengths of first and
  1671. // second operands and llasm should not allow situation like
  1672. // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
  1673. // <2 x i32> < i32 0, i32 5 >,
  1674. // where i32 5 is invalid, but let it be additional check here:
  1675. llvm_unreachable("Invalid mask in shufflevector instruction");
  1676. }
  1677. break;
  1678. case Type::FloatTyID:
  1679. for( unsigned i=0; i<src3Size; i++) {
  1680. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1681. if(j < src1Size)
  1682. Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
  1683. else if(j < src1Size + src2Size)
  1684. Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
  1685. else
  1686. llvm_unreachable("Invalid mask in shufflevector instruction");
  1687. }
  1688. break;
  1689. case Type::DoubleTyID:
  1690. for( unsigned i=0; i<src3Size; i++) {
  1691. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1692. if(j < src1Size)
  1693. Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
  1694. else if(j < src1Size + src2Size)
  1695. Dest.AggregateVal[i].DoubleVal =
  1696. Src2.AggregateVal[j-src1Size].DoubleVal;
  1697. else
  1698. llvm_unreachable("Invalid mask in shufflevector instruction");
  1699. }
  1700. break;
  1701. }
  1702. SetValue(&I, Dest, SF);
  1703. }
  1704. GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
  1705. ExecutionContext &SF) {
  1706. switch (CE->getOpcode()) {
  1707. case Instruction::Trunc:
  1708. return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
  1709. case Instruction::ZExt:
  1710. return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
  1711. case Instruction::SExt:
  1712. return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
  1713. case Instruction::FPTrunc:
  1714. return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
  1715. case Instruction::FPExt:
  1716. return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
  1717. case Instruction::UIToFP:
  1718. return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1719. case Instruction::SIToFP:
  1720. return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1721. case Instruction::FPToUI:
  1722. return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
  1723. case Instruction::FPToSI:
  1724. return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
  1725. case Instruction::PtrToInt:
  1726. return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
  1727. case Instruction::IntToPtr:
  1728. return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
  1729. case Instruction::BitCast:
  1730. return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
  1731. case Instruction::GetElementPtr:
  1732. return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
  1733. gep_type_end(CE), SF);
  1734. case Instruction::FCmp:
  1735. case Instruction::ICmp:
  1736. return executeCmpInst(CE->getPredicate(),
  1737. getOperandValue(CE->getOperand(0), SF),
  1738. getOperandValue(CE->getOperand(1), SF),
  1739. CE->getOperand(0)->getType());
  1740. case Instruction::Select:
  1741. return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
  1742. getOperandValue(CE->getOperand(1), SF),
  1743. getOperandValue(CE->getOperand(2), SF),
  1744. CE->getOperand(0)->getType());
  1745. default :
  1746. break;
  1747. }
  1748. // The cases below here require a GenericValue parameter for the result
  1749. // so we initialize one, compute it and then return it.
  1750. GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
  1751. GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
  1752. GenericValue Dest;
  1753. Type * Ty = CE->getOperand(0)->getType();
  1754. switch (CE->getOpcode()) {
  1755. case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
  1756. case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
  1757. case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
  1758. case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
  1759. case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
  1760. case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
  1761. case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
  1762. case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
  1763. case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
  1764. case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
  1765. case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
  1766. case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
  1767. case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
  1768. case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
  1769. case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
  1770. case Instruction::Shl:
  1771. Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
  1772. break;
  1773. case Instruction::LShr:
  1774. Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
  1775. break;
  1776. case Instruction::AShr:
  1777. Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
  1778. break;
  1779. default:
  1780. dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
  1781. llvm_unreachable("Unhandled ConstantExpr");
  1782. }
  1783. return Dest;
  1784. }
  1785. GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
  1786. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  1787. return getConstantExprValue(CE, SF);
  1788. } else if (Constant *CPV = dyn_cast<Constant>(V)) {
  1789. return getConstantValue(CPV);
  1790. } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
  1791. return PTOGV(getPointerToGlobal(GV));
  1792. } else {
  1793. return SF.Values[V];
  1794. }
  1795. }
  1796. //===----------------------------------------------------------------------===//
  1797. // Dispatch and Execution Code
  1798. //===----------------------------------------------------------------------===//
  1799. //===----------------------------------------------------------------------===//
  1800. // callFunction - Execute the specified function...
  1801. //
  1802. void Interpreter::callFunction(Function *F,
  1803. const std::vector<GenericValue> &ArgVals) {
  1804. assert((ECStack.empty() || ECStack.back().Caller.getInstruction() == 0 ||
  1805. ECStack.back().Caller.arg_size() == ArgVals.size()) &&
  1806. "Incorrect number of arguments passed into function call!");
  1807. // Make a new stack frame... and fill it in.
  1808. ECStack.push_back(ExecutionContext());
  1809. ExecutionContext &StackFrame = ECStack.back();
  1810. StackFrame.CurFunction = F;
  1811. // Special handling for external functions.
  1812. if (F->isDeclaration()) {
  1813. GenericValue Result = callExternalFunction (F, ArgVals);
  1814. // Simulate a 'ret' instruction of the appropriate type.
  1815. popStackAndReturnValueToCaller (F->getReturnType (), Result);
  1816. return;
  1817. }
  1818. // Get pointers to first LLVM BB & Instruction in function.
  1819. StackFrame.CurBB = F->begin();
  1820. StackFrame.CurInst = StackFrame.CurBB->begin();
  1821. // Run through the function arguments and initialize their values...
  1822. assert((ArgVals.size() == F->arg_size() ||
  1823. (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
  1824. "Invalid number of values passed to function invocation!");
  1825. // Handle non-varargs arguments...
  1826. unsigned i = 0;
  1827. for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
  1828. AI != E; ++AI, ++i)
  1829. SetValue(AI, ArgVals[i], StackFrame);
  1830. // Handle varargs arguments...
  1831. StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
  1832. }
  1833. void Interpreter::run() {
  1834. while (!ECStack.empty()) {
  1835. // Interpret a single instruction & increment the "PC".
  1836. ExecutionContext &SF = ECStack.back(); // Current stack frame
  1837. Instruction &I = *SF.CurInst++; // Increment before execute
  1838. // Track the number of dynamic instructions executed.
  1839. ++NumDynamicInsts;
  1840. DEBUG(dbgs() << "About to interpret: " << I);
  1841. visit(I); // Dispatch to one of the visit* methods...
  1842. #if 0
  1843. // This is not safe, as visiting the instruction could lower it and free I.
  1844. DEBUG(
  1845. if (!isa<CallInst>(I) && !isa<InvokeInst>(I) &&
  1846. I.getType() != Type::VoidTy) {
  1847. dbgs() << " --> ";
  1848. const GenericValue &Val = SF.Values[&I];
  1849. switch (I.getType()->getTypeID()) {
  1850. default: llvm_unreachable("Invalid GenericValue Type");
  1851. case Type::VoidTyID: dbgs() << "void"; break;
  1852. case Type::FloatTyID: dbgs() << "float " << Val.FloatVal; break;
  1853. case Type::DoubleTyID: dbgs() << "double " << Val.DoubleVal; break;
  1854. case Type::PointerTyID: dbgs() << "void* " << intptr_t(Val.PointerVal);
  1855. break;
  1856. case Type::IntegerTyID:
  1857. dbgs() << "i" << Val.IntVal.getBitWidth() << " "
  1858. << Val.IntVal.toStringUnsigned(10)
  1859. << " (0x" << Val.IntVal.toStringUnsigned(16) << ")\n";
  1860. break;
  1861. }
  1862. });
  1863. #endif
  1864. }
  1865. }