Execution.cpp 80 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146
  1. //===-- Execution.cpp - Implement code to simulate the program ------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the actual instruction interpreter.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #define DEBUG_TYPE "interpreter"
  14. #include "Interpreter.h"
  15. #include "llvm/ADT/APInt.h"
  16. #include "llvm/ADT/Statistic.h"
  17. #include "llvm/CodeGen/IntrinsicLowering.h"
  18. #include "llvm/IR/Constants.h"
  19. #include "llvm/IR/DerivedTypes.h"
  20. #include "llvm/IR/Instructions.h"
  21. #include "llvm/Support/CommandLine.h"
  22. #include "llvm/Support/Debug.h"
  23. #include "llvm/Support/ErrorHandling.h"
  24. #include "llvm/Support/GetElementPtrTypeIterator.h"
  25. #include "llvm/Support/MathExtras.h"
  26. #include <algorithm>
  27. #include <cmath>
  28. using namespace llvm;
  29. STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
  30. static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
  31. cl::desc("make the interpreter print every volatile load and store"));
  32. //===----------------------------------------------------------------------===//
  33. // Various Helper Functions
  34. //===----------------------------------------------------------------------===//
  35. static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
  36. SF.Values[V] = Val;
  37. }
  38. //===----------------------------------------------------------------------===//
  39. // Binary Instruction Implementations
  40. //===----------------------------------------------------------------------===//
  41. #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
  42. case Type::TY##TyID: \
  43. Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
  44. break
  45. static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
  46. GenericValue Src2, Type *Ty) {
  47. switch (Ty->getTypeID()) {
  48. IMPLEMENT_BINARY_OPERATOR(+, Float);
  49. IMPLEMENT_BINARY_OPERATOR(+, Double);
  50. default:
  51. dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
  52. llvm_unreachable(0);
  53. }
  54. }
  55. static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
  56. GenericValue Src2, Type *Ty) {
  57. switch (Ty->getTypeID()) {
  58. IMPLEMENT_BINARY_OPERATOR(-, Float);
  59. IMPLEMENT_BINARY_OPERATOR(-, Double);
  60. default:
  61. dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
  62. llvm_unreachable(0);
  63. }
  64. }
  65. static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
  66. GenericValue Src2, Type *Ty) {
  67. switch (Ty->getTypeID()) {
  68. IMPLEMENT_BINARY_OPERATOR(*, Float);
  69. IMPLEMENT_BINARY_OPERATOR(*, Double);
  70. default:
  71. dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
  72. llvm_unreachable(0);
  73. }
  74. }
  75. static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
  76. GenericValue Src2, Type *Ty) {
  77. switch (Ty->getTypeID()) {
  78. IMPLEMENT_BINARY_OPERATOR(/, Float);
  79. IMPLEMENT_BINARY_OPERATOR(/, Double);
  80. default:
  81. dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
  82. llvm_unreachable(0);
  83. }
  84. }
  85. static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
  86. GenericValue Src2, Type *Ty) {
  87. switch (Ty->getTypeID()) {
  88. case Type::FloatTyID:
  89. Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
  90. break;
  91. case Type::DoubleTyID:
  92. Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
  93. break;
  94. default:
  95. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  96. llvm_unreachable(0);
  97. }
  98. }
  99. #define IMPLEMENT_INTEGER_ICMP(OP, TY) \
  100. case Type::IntegerTyID: \
  101. Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
  102. break;
  103. #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
  104. case Type::VectorTyID: { \
  105. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  106. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  107. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  108. Dest.AggregateVal[_i].IntVal = APInt(1, \
  109. Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
  110. } break;
  111. // Handle pointers specially because they must be compared with only as much
  112. // width as the host has. We _do not_ want to be comparing 64 bit values when
  113. // running on a 32-bit target, otherwise the upper 32 bits might mess up
  114. // comparisons if they contain garbage.
  115. #define IMPLEMENT_POINTER_ICMP(OP) \
  116. case Type::PointerTyID: \
  117. Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
  118. (void*)(intptr_t)Src2.PointerVal); \
  119. break;
  120. static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
  121. Type *Ty) {
  122. GenericValue Dest;
  123. switch (Ty->getTypeID()) {
  124. IMPLEMENT_INTEGER_ICMP(eq,Ty);
  125. IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
  126. IMPLEMENT_POINTER_ICMP(==);
  127. default:
  128. dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
  129. llvm_unreachable(0);
  130. }
  131. return Dest;
  132. }
  133. static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
  134. Type *Ty) {
  135. GenericValue Dest;
  136. switch (Ty->getTypeID()) {
  137. IMPLEMENT_INTEGER_ICMP(ne,Ty);
  138. IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
  139. IMPLEMENT_POINTER_ICMP(!=);
  140. default:
  141. dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
  142. llvm_unreachable(0);
  143. }
  144. return Dest;
  145. }
  146. static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
  147. Type *Ty) {
  148. GenericValue Dest;
  149. switch (Ty->getTypeID()) {
  150. IMPLEMENT_INTEGER_ICMP(ult,Ty);
  151. IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
  152. IMPLEMENT_POINTER_ICMP(<);
  153. default:
  154. dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
  155. llvm_unreachable(0);
  156. }
  157. return Dest;
  158. }
  159. static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
  160. Type *Ty) {
  161. GenericValue Dest;
  162. switch (Ty->getTypeID()) {
  163. IMPLEMENT_INTEGER_ICMP(slt,Ty);
  164. IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
  165. IMPLEMENT_POINTER_ICMP(<);
  166. default:
  167. dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
  168. llvm_unreachable(0);
  169. }
  170. return Dest;
  171. }
  172. static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
  173. Type *Ty) {
  174. GenericValue Dest;
  175. switch (Ty->getTypeID()) {
  176. IMPLEMENT_INTEGER_ICMP(ugt,Ty);
  177. IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
  178. IMPLEMENT_POINTER_ICMP(>);
  179. default:
  180. dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
  181. llvm_unreachable(0);
  182. }
  183. return Dest;
  184. }
  185. static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
  186. Type *Ty) {
  187. GenericValue Dest;
  188. switch (Ty->getTypeID()) {
  189. IMPLEMENT_INTEGER_ICMP(sgt,Ty);
  190. IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
  191. IMPLEMENT_POINTER_ICMP(>);
  192. default:
  193. dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
  194. llvm_unreachable(0);
  195. }
  196. return Dest;
  197. }
  198. static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
  199. Type *Ty) {
  200. GenericValue Dest;
  201. switch (Ty->getTypeID()) {
  202. IMPLEMENT_INTEGER_ICMP(ule,Ty);
  203. IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
  204. IMPLEMENT_POINTER_ICMP(<=);
  205. default:
  206. dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
  207. llvm_unreachable(0);
  208. }
  209. return Dest;
  210. }
  211. static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
  212. Type *Ty) {
  213. GenericValue Dest;
  214. switch (Ty->getTypeID()) {
  215. IMPLEMENT_INTEGER_ICMP(sle,Ty);
  216. IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
  217. IMPLEMENT_POINTER_ICMP(<=);
  218. default:
  219. dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
  220. llvm_unreachable(0);
  221. }
  222. return Dest;
  223. }
  224. static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
  225. Type *Ty) {
  226. GenericValue Dest;
  227. switch (Ty->getTypeID()) {
  228. IMPLEMENT_INTEGER_ICMP(uge,Ty);
  229. IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
  230. IMPLEMENT_POINTER_ICMP(>=);
  231. default:
  232. dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
  233. llvm_unreachable(0);
  234. }
  235. return Dest;
  236. }
  237. static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
  238. Type *Ty) {
  239. GenericValue Dest;
  240. switch (Ty->getTypeID()) {
  241. IMPLEMENT_INTEGER_ICMP(sge,Ty);
  242. IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
  243. IMPLEMENT_POINTER_ICMP(>=);
  244. default:
  245. dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
  246. llvm_unreachable(0);
  247. }
  248. return Dest;
  249. }
  250. void Interpreter::visitICmpInst(ICmpInst &I) {
  251. ExecutionContext &SF = ECStack.back();
  252. Type *Ty = I.getOperand(0)->getType();
  253. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  254. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  255. GenericValue R; // Result
  256. switch (I.getPredicate()) {
  257. case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
  258. case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
  259. case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
  260. case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
  261. case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
  262. case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
  263. case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
  264. case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
  265. case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
  266. case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
  267. default:
  268. dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
  269. llvm_unreachable(0);
  270. }
  271. SetValue(&I, R, SF);
  272. }
  273. #define IMPLEMENT_FCMP(OP, TY) \
  274. case Type::TY##TyID: \
  275. Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
  276. break
  277. #define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
  278. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  279. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  280. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  281. Dest.AggregateVal[_i].IntVal = APInt(1, \
  282. Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
  283. break;
  284. #define IMPLEMENT_VECTOR_FCMP(OP) \
  285. case Type::VectorTyID: \
  286. if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
  287. IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
  288. } else { \
  289. IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
  290. }
  291. static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
  292. Type *Ty) {
  293. GenericValue Dest;
  294. switch (Ty->getTypeID()) {
  295. IMPLEMENT_FCMP(==, Float);
  296. IMPLEMENT_FCMP(==, Double);
  297. IMPLEMENT_VECTOR_FCMP(==);
  298. default:
  299. dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
  300. llvm_unreachable(0);
  301. }
  302. return Dest;
  303. }
  304. #define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
  305. if (TY->isFloatTy()) { \
  306. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  307. Dest.IntVal = APInt(1,false); \
  308. return Dest; \
  309. } \
  310. } else { \
  311. if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  312. Dest.IntVal = APInt(1,false); \
  313. return Dest; \
  314. } \
  315. }
  316. #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
  317. assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
  318. Dest.AggregateVal.resize( X.AggregateVal.size() ); \
  319. for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
  320. if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
  321. Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
  322. Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
  323. else { \
  324. Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
  325. } \
  326. }
  327. #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
  328. if (TY->isVectorTy()) { \
  329. if (dyn_cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
  330. MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
  331. } else { \
  332. MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
  333. } \
  334. } \
  335. static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
  336. Type *Ty)
  337. {
  338. GenericValue Dest;
  339. // if input is scalar value and Src1 or Src2 is NaN return false
  340. IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
  341. // if vector input detect NaNs and fill mask
  342. MASK_VECTOR_NANS(Ty, Src1, Src2, false)
  343. GenericValue DestMask = Dest;
  344. switch (Ty->getTypeID()) {
  345. IMPLEMENT_FCMP(!=, Float);
  346. IMPLEMENT_FCMP(!=, Double);
  347. IMPLEMENT_VECTOR_FCMP(!=);
  348. default:
  349. dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
  350. llvm_unreachable(0);
  351. }
  352. // in vector case mask out NaN elements
  353. if (Ty->isVectorTy())
  354. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  355. if (DestMask.AggregateVal[_i].IntVal == false)
  356. Dest.AggregateVal[_i].IntVal = APInt(1,false);
  357. return Dest;
  358. }
  359. static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
  360. Type *Ty) {
  361. GenericValue Dest;
  362. switch (Ty->getTypeID()) {
  363. IMPLEMENT_FCMP(<=, Float);
  364. IMPLEMENT_FCMP(<=, Double);
  365. IMPLEMENT_VECTOR_FCMP(<=);
  366. default:
  367. dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
  368. llvm_unreachable(0);
  369. }
  370. return Dest;
  371. }
  372. static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
  373. Type *Ty) {
  374. GenericValue Dest;
  375. switch (Ty->getTypeID()) {
  376. IMPLEMENT_FCMP(>=, Float);
  377. IMPLEMENT_FCMP(>=, Double);
  378. IMPLEMENT_VECTOR_FCMP(>=);
  379. default:
  380. dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
  381. llvm_unreachable(0);
  382. }
  383. return Dest;
  384. }
  385. static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
  386. Type *Ty) {
  387. GenericValue Dest;
  388. switch (Ty->getTypeID()) {
  389. IMPLEMENT_FCMP(<, Float);
  390. IMPLEMENT_FCMP(<, Double);
  391. IMPLEMENT_VECTOR_FCMP(<);
  392. default:
  393. dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
  394. llvm_unreachable(0);
  395. }
  396. return Dest;
  397. }
  398. static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
  399. Type *Ty) {
  400. GenericValue Dest;
  401. switch (Ty->getTypeID()) {
  402. IMPLEMENT_FCMP(>, Float);
  403. IMPLEMENT_FCMP(>, Double);
  404. IMPLEMENT_VECTOR_FCMP(>);
  405. default:
  406. dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
  407. llvm_unreachable(0);
  408. }
  409. return Dest;
  410. }
  411. #define IMPLEMENT_UNORDERED(TY, X,Y) \
  412. if (TY->isFloatTy()) { \
  413. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  414. Dest.IntVal = APInt(1,true); \
  415. return Dest; \
  416. } \
  417. } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  418. Dest.IntVal = APInt(1,true); \
  419. return Dest; \
  420. }
  421. #define IMPLEMENT_VECTOR_UNORDERED(TY, X,Y, _FUNC) \
  422. if (TY->isVectorTy()) { \
  423. GenericValue DestMask = Dest; \
  424. Dest = _FUNC(Src1, Src2, Ty); \
  425. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) \
  426. if (DestMask.AggregateVal[_i].IntVal == true) \
  427. Dest.AggregateVal[_i].IntVal = APInt(1,true); \
  428. return Dest; \
  429. }
  430. static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
  431. Type *Ty) {
  432. GenericValue Dest;
  433. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  434. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  435. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
  436. return executeFCMP_OEQ(Src1, Src2, Ty);
  437. }
  438. static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
  439. Type *Ty) {
  440. GenericValue Dest;
  441. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  442. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  443. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
  444. return executeFCMP_ONE(Src1, Src2, Ty);
  445. }
  446. static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
  447. Type *Ty) {
  448. GenericValue Dest;
  449. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  450. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  451. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
  452. return executeFCMP_OLE(Src1, Src2, Ty);
  453. }
  454. static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
  455. Type *Ty) {
  456. GenericValue Dest;
  457. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  458. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  459. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
  460. return executeFCMP_OGE(Src1, Src2, Ty);
  461. }
  462. static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
  463. Type *Ty) {
  464. GenericValue Dest;
  465. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  466. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  467. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
  468. return executeFCMP_OLT(Src1, Src2, Ty);
  469. }
  470. static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
  471. Type *Ty) {
  472. GenericValue Dest;
  473. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  474. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  475. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
  476. return executeFCMP_OGT(Src1, Src2, Ty);
  477. }
  478. static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
  479. Type *Ty) {
  480. GenericValue Dest;
  481. if(Ty->isVectorTy()) {
  482. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  483. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  484. if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  485. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  486. Dest.AggregateVal[_i].IntVal = APInt(1,
  487. ( (Src1.AggregateVal[_i].FloatVal ==
  488. Src1.AggregateVal[_i].FloatVal) &&
  489. (Src2.AggregateVal[_i].FloatVal ==
  490. Src2.AggregateVal[_i].FloatVal)));
  491. } else {
  492. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  493. Dest.AggregateVal[_i].IntVal = APInt(1,
  494. ( (Src1.AggregateVal[_i].DoubleVal ==
  495. Src1.AggregateVal[_i].DoubleVal) &&
  496. (Src2.AggregateVal[_i].DoubleVal ==
  497. Src2.AggregateVal[_i].DoubleVal)));
  498. }
  499. } else if (Ty->isFloatTy())
  500. Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
  501. Src2.FloatVal == Src2.FloatVal));
  502. else {
  503. Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
  504. Src2.DoubleVal == Src2.DoubleVal));
  505. }
  506. return Dest;
  507. }
  508. static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
  509. Type *Ty) {
  510. GenericValue Dest;
  511. if(Ty->isVectorTy()) {
  512. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  513. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  514. if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  515. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  516. Dest.AggregateVal[_i].IntVal = APInt(1,
  517. ( (Src1.AggregateVal[_i].FloatVal !=
  518. Src1.AggregateVal[_i].FloatVal) ||
  519. (Src2.AggregateVal[_i].FloatVal !=
  520. Src2.AggregateVal[_i].FloatVal)));
  521. } else {
  522. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  523. Dest.AggregateVal[_i].IntVal = APInt(1,
  524. ( (Src1.AggregateVal[_i].DoubleVal !=
  525. Src1.AggregateVal[_i].DoubleVal) ||
  526. (Src2.AggregateVal[_i].DoubleVal !=
  527. Src2.AggregateVal[_i].DoubleVal)));
  528. }
  529. } else if (Ty->isFloatTy())
  530. Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
  531. Src2.FloatVal != Src2.FloatVal));
  532. else {
  533. Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
  534. Src2.DoubleVal != Src2.DoubleVal));
  535. }
  536. return Dest;
  537. }
  538. static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
  539. const Type *Ty, const bool val) {
  540. GenericValue Dest;
  541. if(Ty->isVectorTy()) {
  542. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  543. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  544. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  545. Dest.AggregateVal[_i].IntVal = APInt(1,val);
  546. } else {
  547. Dest.IntVal = APInt(1, val);
  548. }
  549. return Dest;
  550. }
  551. void Interpreter::visitFCmpInst(FCmpInst &I) {
  552. ExecutionContext &SF = ECStack.back();
  553. Type *Ty = I.getOperand(0)->getType();
  554. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  555. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  556. GenericValue R; // Result
  557. switch (I.getPredicate()) {
  558. default:
  559. dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
  560. llvm_unreachable(0);
  561. break;
  562. case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
  563. break;
  564. case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
  565. break;
  566. case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
  567. case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
  568. case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
  569. case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
  570. case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
  571. case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
  572. case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
  573. case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
  574. case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
  575. case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
  576. case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
  577. case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
  578. case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
  579. case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
  580. }
  581. SetValue(&I, R, SF);
  582. }
  583. static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
  584. GenericValue Src2, Type *Ty) {
  585. GenericValue Result;
  586. switch (predicate) {
  587. case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
  588. case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
  589. case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
  590. case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
  591. case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
  592. case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
  593. case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
  594. case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
  595. case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
  596. case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
  597. case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
  598. case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
  599. case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
  600. case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
  601. case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
  602. case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
  603. case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
  604. case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
  605. case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
  606. case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
  607. case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
  608. case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
  609. case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
  610. case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
  611. case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
  612. case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
  613. default:
  614. dbgs() << "Unhandled Cmp predicate\n";
  615. llvm_unreachable(0);
  616. }
  617. }
  618. void Interpreter::visitBinaryOperator(BinaryOperator &I) {
  619. ExecutionContext &SF = ECStack.back();
  620. Type *Ty = I.getOperand(0)->getType();
  621. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  622. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  623. GenericValue R; // Result
  624. // First process vector operation
  625. if (Ty->isVectorTy()) {
  626. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  627. R.AggregateVal.resize(Src1.AggregateVal.size());
  628. // Macros to execute binary operation 'OP' over integer vectors
  629. #define INTEGER_VECTOR_OPERATION(OP) \
  630. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  631. R.AggregateVal[i].IntVal = \
  632. Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
  633. // Additional macros to execute binary operations udiv/sdiv/urem/srem since
  634. // they have different notation.
  635. #define INTEGER_VECTOR_FUNCTION(OP) \
  636. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  637. R.AggregateVal[i].IntVal = \
  638. Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
  639. // Macros to execute binary operation 'OP' over floating point type TY
  640. // (float or double) vectors
  641. #define FLOAT_VECTOR_FUNCTION(OP, TY) \
  642. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  643. R.AggregateVal[i].TY = \
  644. Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
  645. // Macros to choose appropriate TY: float or double and run operation
  646. // execution
  647. #define FLOAT_VECTOR_OP(OP) { \
  648. if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
  649. FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
  650. else { \
  651. if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
  652. FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
  653. else { \
  654. dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
  655. llvm_unreachable(0); \
  656. } \
  657. } \
  658. }
  659. switch(I.getOpcode()){
  660. default:
  661. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  662. llvm_unreachable(0);
  663. break;
  664. case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
  665. case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
  666. case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
  667. case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
  668. case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
  669. case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
  670. case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
  671. case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
  672. case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
  673. case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
  674. case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
  675. case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
  676. case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
  677. case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
  678. case Instruction::FRem:
  679. if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy())
  680. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  681. R.AggregateVal[i].FloatVal =
  682. fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
  683. else {
  684. if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy())
  685. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  686. R.AggregateVal[i].DoubleVal =
  687. fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
  688. else {
  689. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  690. llvm_unreachable(0);
  691. }
  692. }
  693. break;
  694. }
  695. } else {
  696. switch (I.getOpcode()) {
  697. default:
  698. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  699. llvm_unreachable(0);
  700. break;
  701. case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
  702. case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
  703. case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
  704. case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
  705. case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
  706. case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
  707. case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
  708. case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
  709. case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
  710. case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
  711. case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
  712. case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
  713. case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
  714. case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
  715. case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
  716. }
  717. }
  718. SetValue(&I, R, SF);
  719. }
  720. static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
  721. GenericValue Src3, const Type *Ty) {
  722. GenericValue Dest;
  723. if(Ty->isVectorTy()) {
  724. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  725. assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
  726. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  727. for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
  728. Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
  729. Src3.AggregateVal[i] : Src2.AggregateVal[i];
  730. } else {
  731. Dest = (Src1.IntVal == 0) ? Src3 : Src2;
  732. }
  733. return Dest;
  734. }
  735. void Interpreter::visitSelectInst(SelectInst &I) {
  736. ExecutionContext &SF = ECStack.back();
  737. const Type * Ty = I.getOperand(0)->getType();
  738. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  739. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  740. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  741. GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
  742. SetValue(&I, R, SF);
  743. }
  744. //===----------------------------------------------------------------------===//
  745. // Terminator Instruction Implementations
  746. //===----------------------------------------------------------------------===//
  747. void Interpreter::exitCalled(GenericValue GV) {
  748. // runAtExitHandlers() assumes there are no stack frames, but
  749. // if exit() was called, then it had a stack frame. Blow away
  750. // the stack before interpreting atexit handlers.
  751. ECStack.clear();
  752. runAtExitHandlers();
  753. exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
  754. }
  755. /// Pop the last stack frame off of ECStack and then copy the result
  756. /// back into the result variable if we are not returning void. The
  757. /// result variable may be the ExitValue, or the Value of the calling
  758. /// CallInst if there was a previous stack frame. This method may
  759. /// invalidate any ECStack iterators you have. This method also takes
  760. /// care of switching to the normal destination BB, if we are returning
  761. /// from an invoke.
  762. ///
  763. void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
  764. GenericValue Result) {
  765. // Pop the current stack frame.
  766. ECStack.pop_back();
  767. if (ECStack.empty()) { // Finished main. Put result into exit code...
  768. if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
  769. ExitValue = Result; // Capture the exit value of the program
  770. } else {
  771. memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
  772. }
  773. } else {
  774. // If we have a previous stack frame, and we have a previous call,
  775. // fill in the return value...
  776. ExecutionContext &CallingSF = ECStack.back();
  777. if (Instruction *I = CallingSF.Caller.getInstruction()) {
  778. // Save result...
  779. if (!CallingSF.Caller.getType()->isVoidTy())
  780. SetValue(I, Result, CallingSF);
  781. if (InvokeInst *II = dyn_cast<InvokeInst> (I))
  782. SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
  783. CallingSF.Caller = CallSite(); // We returned from the call...
  784. }
  785. }
  786. }
  787. void Interpreter::visitReturnInst(ReturnInst &I) {
  788. ExecutionContext &SF = ECStack.back();
  789. Type *RetTy = Type::getVoidTy(I.getContext());
  790. GenericValue Result;
  791. // Save away the return value... (if we are not 'ret void')
  792. if (I.getNumOperands()) {
  793. RetTy = I.getReturnValue()->getType();
  794. Result = getOperandValue(I.getReturnValue(), SF);
  795. }
  796. popStackAndReturnValueToCaller(RetTy, Result);
  797. }
  798. void Interpreter::visitUnreachableInst(UnreachableInst &I) {
  799. report_fatal_error("Program executed an 'unreachable' instruction!");
  800. }
  801. void Interpreter::visitBranchInst(BranchInst &I) {
  802. ExecutionContext &SF = ECStack.back();
  803. BasicBlock *Dest;
  804. Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
  805. if (!I.isUnconditional()) {
  806. Value *Cond = I.getCondition();
  807. if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
  808. Dest = I.getSuccessor(1);
  809. }
  810. SwitchToNewBasicBlock(Dest, SF);
  811. }
  812. void Interpreter::visitSwitchInst(SwitchInst &I) {
  813. ExecutionContext &SF = ECStack.back();
  814. Value* Cond = I.getCondition();
  815. Type *ElTy = Cond->getType();
  816. GenericValue CondVal = getOperandValue(Cond, SF);
  817. // Check to see if any of the cases match...
  818. BasicBlock *Dest = 0;
  819. for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
  820. GenericValue CaseVal = getOperandValue(i.getCaseValue(), SF);
  821. if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
  822. Dest = cast<BasicBlock>(i.getCaseSuccessor());
  823. break;
  824. }
  825. }
  826. if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
  827. SwitchToNewBasicBlock(Dest, SF);
  828. }
  829. void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
  830. ExecutionContext &SF = ECStack.back();
  831. void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
  832. SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
  833. }
  834. // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
  835. // This function handles the actual updating of block and instruction iterators
  836. // as well as execution of all of the PHI nodes in the destination block.
  837. //
  838. // This method does this because all of the PHI nodes must be executed
  839. // atomically, reading their inputs before any of the results are updated. Not
  840. // doing this can cause problems if the PHI nodes depend on other PHI nodes for
  841. // their inputs. If the input PHI node is updated before it is read, incorrect
  842. // results can happen. Thus we use a two phase approach.
  843. //
  844. void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
  845. BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
  846. SF.CurBB = Dest; // Update CurBB to branch destination
  847. SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
  848. if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
  849. // Loop over all of the PHI nodes in the current block, reading their inputs.
  850. std::vector<GenericValue> ResultValues;
  851. for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
  852. // Search for the value corresponding to this previous bb...
  853. int i = PN->getBasicBlockIndex(PrevBB);
  854. assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
  855. Value *IncomingValue = PN->getIncomingValue(i);
  856. // Save the incoming value for this PHI node...
  857. ResultValues.push_back(getOperandValue(IncomingValue, SF));
  858. }
  859. // Now loop over all of the PHI nodes setting their values...
  860. SF.CurInst = SF.CurBB->begin();
  861. for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
  862. PHINode *PN = cast<PHINode>(SF.CurInst);
  863. SetValue(PN, ResultValues[i], SF);
  864. }
  865. }
  866. //===----------------------------------------------------------------------===//
  867. // Memory Instruction Implementations
  868. //===----------------------------------------------------------------------===//
  869. void Interpreter::visitAllocaInst(AllocaInst &I) {
  870. ExecutionContext &SF = ECStack.back();
  871. Type *Ty = I.getType()->getElementType(); // Type to be allocated
  872. // Get the number of elements being allocated by the array...
  873. unsigned NumElements =
  874. getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
  875. unsigned TypeSize = (size_t)TD.getTypeAllocSize(Ty);
  876. // Avoid malloc-ing zero bytes, use max()...
  877. unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
  878. // Allocate enough memory to hold the type...
  879. void *Memory = malloc(MemToAlloc);
  880. DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
  881. << NumElements << " (Total: " << MemToAlloc << ") at "
  882. << uintptr_t(Memory) << '\n');
  883. GenericValue Result = PTOGV(Memory);
  884. assert(Result.PointerVal != 0 && "Null pointer returned by malloc!");
  885. SetValue(&I, Result, SF);
  886. if (I.getOpcode() == Instruction::Alloca)
  887. ECStack.back().Allocas.add(Memory);
  888. }
  889. // getElementOffset - The workhorse for getelementptr.
  890. //
  891. GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
  892. gep_type_iterator E,
  893. ExecutionContext &SF) {
  894. assert(Ptr->getType()->isPointerTy() &&
  895. "Cannot getElementOffset of a nonpointer type!");
  896. uint64_t Total = 0;
  897. for (; I != E; ++I) {
  898. if (StructType *STy = dyn_cast<StructType>(*I)) {
  899. const StructLayout *SLO = TD.getStructLayout(STy);
  900. const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
  901. unsigned Index = unsigned(CPU->getZExtValue());
  902. Total += SLO->getElementOffset(Index);
  903. } else {
  904. SequentialType *ST = cast<SequentialType>(*I);
  905. // Get the index number for the array... which must be long type...
  906. GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
  907. int64_t Idx;
  908. unsigned BitWidth =
  909. cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
  910. if (BitWidth == 32)
  911. Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
  912. else {
  913. assert(BitWidth == 64 && "Invalid index type for getelementptr");
  914. Idx = (int64_t)IdxGV.IntVal.getZExtValue();
  915. }
  916. Total += TD.getTypeAllocSize(ST->getElementType())*Idx;
  917. }
  918. }
  919. GenericValue Result;
  920. Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
  921. DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
  922. return Result;
  923. }
  924. void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
  925. ExecutionContext &SF = ECStack.back();
  926. SetValue(&I, executeGEPOperation(I.getPointerOperand(),
  927. gep_type_begin(I), gep_type_end(I), SF), SF);
  928. }
  929. void Interpreter::visitLoadInst(LoadInst &I) {
  930. ExecutionContext &SF = ECStack.back();
  931. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  932. GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
  933. GenericValue Result;
  934. LoadValueFromMemory(Result, Ptr, I.getType());
  935. SetValue(&I, Result, SF);
  936. if (I.isVolatile() && PrintVolatile)
  937. dbgs() << "Volatile load " << I;
  938. }
  939. void Interpreter::visitStoreInst(StoreInst &I) {
  940. ExecutionContext &SF = ECStack.back();
  941. GenericValue Val = getOperandValue(I.getOperand(0), SF);
  942. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  943. StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
  944. I.getOperand(0)->getType());
  945. if (I.isVolatile() && PrintVolatile)
  946. dbgs() << "Volatile store: " << I;
  947. }
  948. //===----------------------------------------------------------------------===//
  949. // Miscellaneous Instruction Implementations
  950. //===----------------------------------------------------------------------===//
  951. void Interpreter::visitCallSite(CallSite CS) {
  952. ExecutionContext &SF = ECStack.back();
  953. // Check to see if this is an intrinsic function call...
  954. Function *F = CS.getCalledFunction();
  955. if (F && F->isDeclaration())
  956. switch (F->getIntrinsicID()) {
  957. case Intrinsic::not_intrinsic:
  958. break;
  959. case Intrinsic::vastart: { // va_start
  960. GenericValue ArgIndex;
  961. ArgIndex.UIntPairVal.first = ECStack.size() - 1;
  962. ArgIndex.UIntPairVal.second = 0;
  963. SetValue(CS.getInstruction(), ArgIndex, SF);
  964. return;
  965. }
  966. case Intrinsic::vaend: // va_end is a noop for the interpreter
  967. return;
  968. case Intrinsic::vacopy: // va_copy: dest = src
  969. SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
  970. return;
  971. default:
  972. // If it is an unknown intrinsic function, use the intrinsic lowering
  973. // class to transform it into hopefully tasty LLVM code.
  974. //
  975. BasicBlock::iterator me(CS.getInstruction());
  976. BasicBlock *Parent = CS.getInstruction()->getParent();
  977. bool atBegin(Parent->begin() == me);
  978. if (!atBegin)
  979. --me;
  980. IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
  981. // Restore the CurInst pointer to the first instruction newly inserted, if
  982. // any.
  983. if (atBegin) {
  984. SF.CurInst = Parent->begin();
  985. } else {
  986. SF.CurInst = me;
  987. ++SF.CurInst;
  988. }
  989. return;
  990. }
  991. SF.Caller = CS;
  992. std::vector<GenericValue> ArgVals;
  993. const unsigned NumArgs = SF.Caller.arg_size();
  994. ArgVals.reserve(NumArgs);
  995. uint16_t pNum = 1;
  996. for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
  997. e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
  998. Value *V = *i;
  999. ArgVals.push_back(getOperandValue(V, SF));
  1000. }
  1001. // To handle indirect calls, we must get the pointer value from the argument
  1002. // and treat it as a function pointer.
  1003. GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
  1004. callFunction((Function*)GVTOP(SRC), ArgVals);
  1005. }
  1006. // auxiliary function for shift operations
  1007. static unsigned getShiftAmount(uint64_t orgShiftAmount,
  1008. llvm::APInt valueToShift) {
  1009. unsigned valueWidth = valueToShift.getBitWidth();
  1010. if (orgShiftAmount < (uint64_t)valueWidth)
  1011. return orgShiftAmount;
  1012. // according to the llvm documentation, if orgShiftAmount > valueWidth,
  1013. // the result is undfeined. but we do shift by this rule:
  1014. return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
  1015. }
  1016. void Interpreter::visitShl(BinaryOperator &I) {
  1017. ExecutionContext &SF = ECStack.back();
  1018. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1019. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1020. GenericValue Dest;
  1021. const Type *Ty = I.getType();
  1022. if (Ty->isVectorTy()) {
  1023. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1024. assert(src1Size == Src2.AggregateVal.size());
  1025. for (unsigned i = 0; i < src1Size; i++) {
  1026. GenericValue Result;
  1027. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1028. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1029. Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1030. Dest.AggregateVal.push_back(Result);
  1031. }
  1032. } else {
  1033. // scalar
  1034. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1035. llvm::APInt valueToShift = Src1.IntVal;
  1036. Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1037. }
  1038. SetValue(&I, Dest, SF);
  1039. }
  1040. void Interpreter::visitLShr(BinaryOperator &I) {
  1041. ExecutionContext &SF = ECStack.back();
  1042. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1043. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1044. GenericValue Dest;
  1045. const Type *Ty = I.getType();
  1046. if (Ty->isVectorTy()) {
  1047. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1048. assert(src1Size == Src2.AggregateVal.size());
  1049. for (unsigned i = 0; i < src1Size; i++) {
  1050. GenericValue Result;
  1051. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1052. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1053. Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1054. Dest.AggregateVal.push_back(Result);
  1055. }
  1056. } else {
  1057. // scalar
  1058. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1059. llvm::APInt valueToShift = Src1.IntVal;
  1060. Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1061. }
  1062. SetValue(&I, Dest, SF);
  1063. }
  1064. void Interpreter::visitAShr(BinaryOperator &I) {
  1065. ExecutionContext &SF = ECStack.back();
  1066. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1067. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1068. GenericValue Dest;
  1069. const Type *Ty = I.getType();
  1070. if (Ty->isVectorTy()) {
  1071. size_t src1Size = Src1.AggregateVal.size();
  1072. assert(src1Size == Src2.AggregateVal.size());
  1073. for (unsigned i = 0; i < src1Size; i++) {
  1074. GenericValue Result;
  1075. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1076. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1077. Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1078. Dest.AggregateVal.push_back(Result);
  1079. }
  1080. } else {
  1081. // scalar
  1082. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1083. llvm::APInt valueToShift = Src1.IntVal;
  1084. Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1085. }
  1086. SetValue(&I, Dest, SF);
  1087. }
  1088. GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
  1089. ExecutionContext &SF) {
  1090. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1091. Type *SrcTy = SrcVal->getType();
  1092. if (SrcTy->isVectorTy()) {
  1093. Type *DstVecTy = DstTy->getScalarType();
  1094. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1095. unsigned NumElts = Src.AggregateVal.size();
  1096. // the sizes of src and dst vectors must be equal
  1097. Dest.AggregateVal.resize(NumElts);
  1098. for (unsigned i = 0; i < NumElts; i++)
  1099. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
  1100. } else {
  1101. IntegerType *DITy = cast<IntegerType>(DstTy);
  1102. unsigned DBitWidth = DITy->getBitWidth();
  1103. Dest.IntVal = Src.IntVal.trunc(DBitWidth);
  1104. }
  1105. return Dest;
  1106. }
  1107. GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
  1108. ExecutionContext &SF) {
  1109. const Type *SrcTy = SrcVal->getType();
  1110. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1111. if (SrcTy->isVectorTy()) {
  1112. const Type *DstVecTy = DstTy->getScalarType();
  1113. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1114. unsigned size = Src.AggregateVal.size();
  1115. // the sizes of src and dst vectors must be equal.
  1116. Dest.AggregateVal.resize(size);
  1117. for (unsigned i = 0; i < size; i++)
  1118. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
  1119. } else {
  1120. const IntegerType *DITy = cast<IntegerType>(DstTy);
  1121. unsigned DBitWidth = DITy->getBitWidth();
  1122. Dest.IntVal = Src.IntVal.sext(DBitWidth);
  1123. }
  1124. return Dest;
  1125. }
  1126. GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
  1127. ExecutionContext &SF) {
  1128. const Type *SrcTy = SrcVal->getType();
  1129. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1130. if (SrcTy->isVectorTy()) {
  1131. const Type *DstVecTy = DstTy->getScalarType();
  1132. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1133. unsigned size = Src.AggregateVal.size();
  1134. // the sizes of src and dst vectors must be equal.
  1135. Dest.AggregateVal.resize(size);
  1136. for (unsigned i = 0; i < size; i++)
  1137. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
  1138. } else {
  1139. const IntegerType *DITy = cast<IntegerType>(DstTy);
  1140. unsigned DBitWidth = DITy->getBitWidth();
  1141. Dest.IntVal = Src.IntVal.zext(DBitWidth);
  1142. }
  1143. return Dest;
  1144. }
  1145. GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
  1146. ExecutionContext &SF) {
  1147. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1148. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1149. assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
  1150. DstTy->getScalarType()->isFloatTy() &&
  1151. "Invalid FPTrunc instruction");
  1152. unsigned size = Src.AggregateVal.size();
  1153. // the sizes of src and dst vectors must be equal.
  1154. Dest.AggregateVal.resize(size);
  1155. for (unsigned i = 0; i < size; i++)
  1156. Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
  1157. } else {
  1158. assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
  1159. "Invalid FPTrunc instruction");
  1160. Dest.FloatVal = (float)Src.DoubleVal;
  1161. }
  1162. return Dest;
  1163. }
  1164. GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
  1165. ExecutionContext &SF) {
  1166. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1167. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1168. assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
  1169. DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
  1170. unsigned size = Src.AggregateVal.size();
  1171. // the sizes of src and dst vectors must be equal.
  1172. Dest.AggregateVal.resize(size);
  1173. for (unsigned i = 0; i < size; i++)
  1174. Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
  1175. } else {
  1176. assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
  1177. "Invalid FPExt instruction");
  1178. Dest.DoubleVal = (double)Src.FloatVal;
  1179. }
  1180. return Dest;
  1181. }
  1182. GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
  1183. ExecutionContext &SF) {
  1184. Type *SrcTy = SrcVal->getType();
  1185. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1186. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1187. const Type *DstVecTy = DstTy->getScalarType();
  1188. const Type *SrcVecTy = SrcTy->getScalarType();
  1189. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1190. unsigned size = Src.AggregateVal.size();
  1191. // the sizes of src and dst vectors must be equal.
  1192. Dest.AggregateVal.resize(size);
  1193. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1194. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1195. for (unsigned i = 0; i < size; i++)
  1196. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1197. Src.AggregateVal[i].FloatVal, DBitWidth);
  1198. } else {
  1199. for (unsigned i = 0; i < size; i++)
  1200. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1201. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1202. }
  1203. } else {
  1204. // scalar
  1205. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1206. assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1207. if (SrcTy->getTypeID() == Type::FloatTyID)
  1208. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1209. else {
  1210. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1211. }
  1212. }
  1213. return Dest;
  1214. }
  1215. GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
  1216. ExecutionContext &SF) {
  1217. Type *SrcTy = SrcVal->getType();
  1218. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1219. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1220. const Type *DstVecTy = DstTy->getScalarType();
  1221. const Type *SrcVecTy = SrcTy->getScalarType();
  1222. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1223. unsigned size = Src.AggregateVal.size();
  1224. // the sizes of src and dst vectors must be equal
  1225. Dest.AggregateVal.resize(size);
  1226. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1227. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1228. for (unsigned i = 0; i < size; i++)
  1229. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1230. Src.AggregateVal[i].FloatVal, DBitWidth);
  1231. } else {
  1232. for (unsigned i = 0; i < size; i++)
  1233. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1234. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1235. }
  1236. } else {
  1237. // scalar
  1238. unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1239. assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1240. if (SrcTy->getTypeID() == Type::FloatTyID)
  1241. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1242. else {
  1243. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1244. }
  1245. }
  1246. return Dest;
  1247. }
  1248. GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
  1249. ExecutionContext &SF) {
  1250. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1251. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1252. const Type *DstVecTy = DstTy->getScalarType();
  1253. unsigned size = Src.AggregateVal.size();
  1254. // the sizes of src and dst vectors must be equal
  1255. Dest.AggregateVal.resize(size);
  1256. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1257. assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1258. for (unsigned i = 0; i < size; i++)
  1259. Dest.AggregateVal[i].FloatVal =
  1260. APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
  1261. } else {
  1262. for (unsigned i = 0; i < size; i++)
  1263. Dest.AggregateVal[i].DoubleVal =
  1264. APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
  1265. }
  1266. } else {
  1267. // scalar
  1268. assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1269. if (DstTy->getTypeID() == Type::FloatTyID)
  1270. Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
  1271. else {
  1272. Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
  1273. }
  1274. }
  1275. return Dest;
  1276. }
  1277. GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
  1278. ExecutionContext &SF) {
  1279. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1280. if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
  1281. const Type *DstVecTy = DstTy->getScalarType();
  1282. unsigned size = Src.AggregateVal.size();
  1283. // the sizes of src and dst vectors must be equal
  1284. Dest.AggregateVal.resize(size);
  1285. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1286. assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1287. for (unsigned i = 0; i < size; i++)
  1288. Dest.AggregateVal[i].FloatVal =
  1289. APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
  1290. } else {
  1291. for (unsigned i = 0; i < size; i++)
  1292. Dest.AggregateVal[i].DoubleVal =
  1293. APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
  1294. }
  1295. } else {
  1296. // scalar
  1297. assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1298. if (DstTy->getTypeID() == Type::FloatTyID)
  1299. Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
  1300. else {
  1301. Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
  1302. }
  1303. }
  1304. return Dest;
  1305. }
  1306. GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
  1307. ExecutionContext &SF) {
  1308. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1309. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1310. assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
  1311. Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
  1312. return Dest;
  1313. }
  1314. GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
  1315. ExecutionContext &SF) {
  1316. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1317. assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
  1318. uint32_t PtrSize = TD.getPointerSizeInBits();
  1319. if (PtrSize != Src.IntVal.getBitWidth())
  1320. Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
  1321. Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
  1322. return Dest;
  1323. }
  1324. GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
  1325. ExecutionContext &SF) {
  1326. // This instruction supports bitwise conversion of vectors to integers and
  1327. // to vectors of other types (as long as they have the same size)
  1328. Type *SrcTy = SrcVal->getType();
  1329. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1330. if ((SrcTy->getTypeID() == Type::VectorTyID) ||
  1331. (DstTy->getTypeID() == Type::VectorTyID)) {
  1332. // vector src bitcast to vector dst or vector src bitcast to scalar dst or
  1333. // scalar src bitcast to vector dst
  1334. bool isLittleEndian = TD.isLittleEndian();
  1335. GenericValue TempDst, TempSrc, SrcVec;
  1336. const Type *SrcElemTy;
  1337. const Type *DstElemTy;
  1338. unsigned SrcBitSize;
  1339. unsigned DstBitSize;
  1340. unsigned SrcNum;
  1341. unsigned DstNum;
  1342. if (SrcTy->getTypeID() == Type::VectorTyID) {
  1343. SrcElemTy = SrcTy->getScalarType();
  1344. SrcBitSize = SrcTy->getScalarSizeInBits();
  1345. SrcNum = Src.AggregateVal.size();
  1346. SrcVec = Src;
  1347. } else {
  1348. // if src is scalar value, make it vector <1 x type>
  1349. SrcElemTy = SrcTy;
  1350. SrcBitSize = SrcTy->getPrimitiveSizeInBits();
  1351. SrcNum = 1;
  1352. SrcVec.AggregateVal.push_back(Src);
  1353. }
  1354. if (DstTy->getTypeID() == Type::VectorTyID) {
  1355. DstElemTy = DstTy->getScalarType();
  1356. DstBitSize = DstTy->getScalarSizeInBits();
  1357. DstNum = (SrcNum * SrcBitSize) / DstBitSize;
  1358. } else {
  1359. DstElemTy = DstTy;
  1360. DstBitSize = DstTy->getPrimitiveSizeInBits();
  1361. DstNum = 1;
  1362. }
  1363. if (SrcNum * SrcBitSize != DstNum * DstBitSize)
  1364. llvm_unreachable("Invalid BitCast");
  1365. // If src is floating point, cast to integer first.
  1366. TempSrc.AggregateVal.resize(SrcNum);
  1367. if (SrcElemTy->isFloatTy()) {
  1368. for (unsigned i = 0; i < SrcNum; i++)
  1369. TempSrc.AggregateVal[i].IntVal =
  1370. APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
  1371. } else if (SrcElemTy->isDoubleTy()) {
  1372. for (unsigned i = 0; i < SrcNum; i++)
  1373. TempSrc.AggregateVal[i].IntVal =
  1374. APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
  1375. } else if (SrcElemTy->isIntegerTy()) {
  1376. for (unsigned i = 0; i < SrcNum; i++)
  1377. TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
  1378. } else {
  1379. // Pointers are not allowed as the element type of vector.
  1380. llvm_unreachable("Invalid Bitcast");
  1381. }
  1382. // now TempSrc is integer type vector
  1383. if (DstNum < SrcNum) {
  1384. // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
  1385. unsigned Ratio = SrcNum / DstNum;
  1386. unsigned SrcElt = 0;
  1387. for (unsigned i = 0; i < DstNum; i++) {
  1388. GenericValue Elt;
  1389. Elt.IntVal = 0;
  1390. Elt.IntVal = Elt.IntVal.zext(DstBitSize);
  1391. unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
  1392. for (unsigned j = 0; j < Ratio; j++) {
  1393. APInt Tmp;
  1394. Tmp = Tmp.zext(SrcBitSize);
  1395. Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
  1396. Tmp = Tmp.zext(DstBitSize);
  1397. Tmp = Tmp.shl(ShiftAmt);
  1398. ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
  1399. Elt.IntVal |= Tmp;
  1400. }
  1401. TempDst.AggregateVal.push_back(Elt);
  1402. }
  1403. } else {
  1404. // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
  1405. unsigned Ratio = DstNum / SrcNum;
  1406. for (unsigned i = 0; i < SrcNum; i++) {
  1407. unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
  1408. for (unsigned j = 0; j < Ratio; j++) {
  1409. GenericValue Elt;
  1410. Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
  1411. Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
  1412. Elt.IntVal = Elt.IntVal.lshr(ShiftAmt);
  1413. // it could be DstBitSize == SrcBitSize, so check it
  1414. if (DstBitSize < SrcBitSize)
  1415. Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
  1416. ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
  1417. TempDst.AggregateVal.push_back(Elt);
  1418. }
  1419. }
  1420. }
  1421. // convert result from integer to specified type
  1422. if (DstTy->getTypeID() == Type::VectorTyID) {
  1423. if (DstElemTy->isDoubleTy()) {
  1424. Dest.AggregateVal.resize(DstNum);
  1425. for (unsigned i = 0; i < DstNum; i++)
  1426. Dest.AggregateVal[i].DoubleVal =
  1427. TempDst.AggregateVal[i].IntVal.bitsToDouble();
  1428. } else if (DstElemTy->isFloatTy()) {
  1429. Dest.AggregateVal.resize(DstNum);
  1430. for (unsigned i = 0; i < DstNum; i++)
  1431. Dest.AggregateVal[i].FloatVal =
  1432. TempDst.AggregateVal[i].IntVal.bitsToFloat();
  1433. } else {
  1434. Dest = TempDst;
  1435. }
  1436. } else {
  1437. if (DstElemTy->isDoubleTy())
  1438. Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
  1439. else if (DstElemTy->isFloatTy()) {
  1440. Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
  1441. } else {
  1442. Dest.IntVal = TempDst.AggregateVal[0].IntVal;
  1443. }
  1444. }
  1445. } else { // if ((SrcTy->getTypeID() == Type::VectorTyID) ||
  1446. // (DstTy->getTypeID() == Type::VectorTyID))
  1447. // scalar src bitcast to scalar dst
  1448. if (DstTy->isPointerTy()) {
  1449. assert(SrcTy->isPointerTy() && "Invalid BitCast");
  1450. Dest.PointerVal = Src.PointerVal;
  1451. } else if (DstTy->isIntegerTy()) {
  1452. if (SrcTy->isFloatTy())
  1453. Dest.IntVal = APInt::floatToBits(Src.FloatVal);
  1454. else if (SrcTy->isDoubleTy()) {
  1455. Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
  1456. } else if (SrcTy->isIntegerTy()) {
  1457. Dest.IntVal = Src.IntVal;
  1458. } else {
  1459. llvm_unreachable("Invalid BitCast");
  1460. }
  1461. } else if (DstTy->isFloatTy()) {
  1462. if (SrcTy->isIntegerTy())
  1463. Dest.FloatVal = Src.IntVal.bitsToFloat();
  1464. else {
  1465. Dest.FloatVal = Src.FloatVal;
  1466. }
  1467. } else if (DstTy->isDoubleTy()) {
  1468. if (SrcTy->isIntegerTy())
  1469. Dest.DoubleVal = Src.IntVal.bitsToDouble();
  1470. else {
  1471. Dest.DoubleVal = Src.DoubleVal;
  1472. }
  1473. } else {
  1474. llvm_unreachable("Invalid Bitcast");
  1475. }
  1476. }
  1477. return Dest;
  1478. }
  1479. void Interpreter::visitTruncInst(TruncInst &I) {
  1480. ExecutionContext &SF = ECStack.back();
  1481. SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1482. }
  1483. void Interpreter::visitSExtInst(SExtInst &I) {
  1484. ExecutionContext &SF = ECStack.back();
  1485. SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
  1486. }
  1487. void Interpreter::visitZExtInst(ZExtInst &I) {
  1488. ExecutionContext &SF = ECStack.back();
  1489. SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
  1490. }
  1491. void Interpreter::visitFPTruncInst(FPTruncInst &I) {
  1492. ExecutionContext &SF = ECStack.back();
  1493. SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1494. }
  1495. void Interpreter::visitFPExtInst(FPExtInst &I) {
  1496. ExecutionContext &SF = ECStack.back();
  1497. SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
  1498. }
  1499. void Interpreter::visitUIToFPInst(UIToFPInst &I) {
  1500. ExecutionContext &SF = ECStack.back();
  1501. SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1502. }
  1503. void Interpreter::visitSIToFPInst(SIToFPInst &I) {
  1504. ExecutionContext &SF = ECStack.back();
  1505. SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1506. }
  1507. void Interpreter::visitFPToUIInst(FPToUIInst &I) {
  1508. ExecutionContext &SF = ECStack.back();
  1509. SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
  1510. }
  1511. void Interpreter::visitFPToSIInst(FPToSIInst &I) {
  1512. ExecutionContext &SF = ECStack.back();
  1513. SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
  1514. }
  1515. void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
  1516. ExecutionContext &SF = ECStack.back();
  1517. SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
  1518. }
  1519. void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
  1520. ExecutionContext &SF = ECStack.back();
  1521. SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
  1522. }
  1523. void Interpreter::visitBitCastInst(BitCastInst &I) {
  1524. ExecutionContext &SF = ECStack.back();
  1525. SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
  1526. }
  1527. #define IMPLEMENT_VAARG(TY) \
  1528. case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
  1529. void Interpreter::visitVAArgInst(VAArgInst &I) {
  1530. ExecutionContext &SF = ECStack.back();
  1531. // Get the incoming valist parameter. LLI treats the valist as a
  1532. // (ec-stack-depth var-arg-index) pair.
  1533. GenericValue VAList = getOperandValue(I.getOperand(0), SF);
  1534. GenericValue Dest;
  1535. GenericValue Src = ECStack[VAList.UIntPairVal.first]
  1536. .VarArgs[VAList.UIntPairVal.second];
  1537. Type *Ty = I.getType();
  1538. switch (Ty->getTypeID()) {
  1539. case Type::IntegerTyID:
  1540. Dest.IntVal = Src.IntVal;
  1541. break;
  1542. IMPLEMENT_VAARG(Pointer);
  1543. IMPLEMENT_VAARG(Float);
  1544. IMPLEMENT_VAARG(Double);
  1545. default:
  1546. dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
  1547. llvm_unreachable(0);
  1548. }
  1549. // Set the Value of this Instruction.
  1550. SetValue(&I, Dest, SF);
  1551. // Move the pointer to the next vararg.
  1552. ++VAList.UIntPairVal.second;
  1553. }
  1554. void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
  1555. ExecutionContext &SF = ECStack.back();
  1556. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1557. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1558. GenericValue Dest;
  1559. Type *Ty = I.getType();
  1560. const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
  1561. if(Src1.AggregateVal.size() > indx) {
  1562. switch (Ty->getTypeID()) {
  1563. default:
  1564. dbgs() << "Unhandled destination type for extractelement instruction: "
  1565. << *Ty << "\n";
  1566. llvm_unreachable(0);
  1567. break;
  1568. case Type::IntegerTyID:
  1569. Dest.IntVal = Src1.AggregateVal[indx].IntVal;
  1570. break;
  1571. case Type::FloatTyID:
  1572. Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
  1573. break;
  1574. case Type::DoubleTyID:
  1575. Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
  1576. break;
  1577. }
  1578. } else {
  1579. dbgs() << "Invalid index in extractelement instruction\n";
  1580. }
  1581. SetValue(&I, Dest, SF);
  1582. }
  1583. void Interpreter::visitInsertElementInst(InsertElementInst &I) {
  1584. ExecutionContext &SF = ECStack.back();
  1585. Type *Ty = I.getType();
  1586. if(!(Ty->isVectorTy()) )
  1587. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1588. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1589. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1590. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1591. GenericValue Dest;
  1592. Type *TyContained = Ty->getContainedType(0);
  1593. const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
  1594. Dest.AggregateVal = Src1.AggregateVal;
  1595. if(Src1.AggregateVal.size() <= indx)
  1596. llvm_unreachable("Invalid index in insertelement instruction");
  1597. switch (TyContained->getTypeID()) {
  1598. default:
  1599. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1600. case Type::IntegerTyID:
  1601. Dest.AggregateVal[indx].IntVal = Src2.IntVal;
  1602. break;
  1603. case Type::FloatTyID:
  1604. Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
  1605. break;
  1606. case Type::DoubleTyID:
  1607. Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
  1608. break;
  1609. }
  1610. SetValue(&I, Dest, SF);
  1611. }
  1612. void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
  1613. ExecutionContext &SF = ECStack.back();
  1614. Type *Ty = I.getType();
  1615. if(!(Ty->isVectorTy()))
  1616. llvm_unreachable("Unhandled dest type for shufflevector instruction");
  1617. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1618. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1619. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1620. GenericValue Dest;
  1621. // There is no need to check types of src1 and src2, because the compiled
  1622. // bytecode can't contain different types for src1 and src2 for a
  1623. // shufflevector instruction.
  1624. Type *TyContained = Ty->getContainedType(0);
  1625. unsigned src1Size = (unsigned)Src1.AggregateVal.size();
  1626. unsigned src2Size = (unsigned)Src2.AggregateVal.size();
  1627. unsigned src3Size = (unsigned)Src3.AggregateVal.size();
  1628. Dest.AggregateVal.resize(src3Size);
  1629. switch (TyContained->getTypeID()) {
  1630. default:
  1631. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1632. break;
  1633. case Type::IntegerTyID:
  1634. for( unsigned i=0; i<src3Size; i++) {
  1635. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1636. if(j < src1Size)
  1637. Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
  1638. else if(j < src1Size + src2Size)
  1639. Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
  1640. else
  1641. // The selector may not be greater than sum of lengths of first and
  1642. // second operands and llasm should not allow situation like
  1643. // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
  1644. // <2 x i32> < i32 0, i32 5 >,
  1645. // where i32 5 is invalid, but let it be additional check here:
  1646. llvm_unreachable("Invalid mask in shufflevector instruction");
  1647. }
  1648. break;
  1649. case Type::FloatTyID:
  1650. for( unsigned i=0; i<src3Size; i++) {
  1651. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1652. if(j < src1Size)
  1653. Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
  1654. else if(j < src1Size + src2Size)
  1655. Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
  1656. else
  1657. llvm_unreachable("Invalid mask in shufflevector instruction");
  1658. }
  1659. break;
  1660. case Type::DoubleTyID:
  1661. for( unsigned i=0; i<src3Size; i++) {
  1662. unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
  1663. if(j < src1Size)
  1664. Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
  1665. else if(j < src1Size + src2Size)
  1666. Dest.AggregateVal[i].DoubleVal =
  1667. Src2.AggregateVal[j-src1Size].DoubleVal;
  1668. else
  1669. llvm_unreachable("Invalid mask in shufflevector instruction");
  1670. }
  1671. break;
  1672. }
  1673. SetValue(&I, Dest, SF);
  1674. }
  1675. void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
  1676. ExecutionContext &SF = ECStack.back();
  1677. Value *Agg = I.getAggregateOperand();
  1678. GenericValue Dest;
  1679. GenericValue Src = getOperandValue(Agg, SF);
  1680. ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
  1681. unsigned Num = I.getNumIndices();
  1682. GenericValue *pSrc = &Src;
  1683. for (unsigned i = 0 ; i < Num; ++i) {
  1684. pSrc = &pSrc->AggregateVal[*IdxBegin];
  1685. ++IdxBegin;
  1686. }
  1687. Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
  1688. switch (IndexedType->getTypeID()) {
  1689. default:
  1690. llvm_unreachable("Unhandled dest type for extractelement instruction");
  1691. break;
  1692. case Type::IntegerTyID:
  1693. Dest.IntVal = pSrc->IntVal;
  1694. break;
  1695. case Type::FloatTyID:
  1696. Dest.FloatVal = pSrc->FloatVal;
  1697. break;
  1698. case Type::DoubleTyID:
  1699. Dest.DoubleVal = pSrc->DoubleVal;
  1700. break;
  1701. case Type::ArrayTyID:
  1702. case Type::StructTyID:
  1703. case Type::VectorTyID:
  1704. Dest.AggregateVal = pSrc->AggregateVal;
  1705. break;
  1706. case Type::PointerTyID:
  1707. Dest.PointerVal = pSrc->PointerVal;
  1708. break;
  1709. }
  1710. SetValue(&I, Dest, SF);
  1711. }
  1712. void Interpreter::visitInsertValueInst(InsertValueInst &I) {
  1713. ExecutionContext &SF = ECStack.back();
  1714. Value *Agg = I.getAggregateOperand();
  1715. GenericValue Src1 = getOperandValue(Agg, SF);
  1716. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1717. GenericValue Dest = Src1; // Dest is a slightly changed Src1
  1718. ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
  1719. unsigned Num = I.getNumIndices();
  1720. GenericValue *pDest = &Dest;
  1721. for (unsigned i = 0 ; i < Num; ++i) {
  1722. pDest = &pDest->AggregateVal[*IdxBegin];
  1723. ++IdxBegin;
  1724. }
  1725. // pDest points to the target value in the Dest now
  1726. Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
  1727. switch (IndexedType->getTypeID()) {
  1728. default:
  1729. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1730. break;
  1731. case Type::IntegerTyID:
  1732. pDest->IntVal = Src2.IntVal;
  1733. break;
  1734. case Type::FloatTyID:
  1735. pDest->FloatVal = Src2.FloatVal;
  1736. break;
  1737. case Type::DoubleTyID:
  1738. pDest->DoubleVal = Src2.DoubleVal;
  1739. break;
  1740. case Type::ArrayTyID:
  1741. case Type::StructTyID:
  1742. case Type::VectorTyID:
  1743. pDest->AggregateVal = Src2.AggregateVal;
  1744. break;
  1745. case Type::PointerTyID:
  1746. pDest->PointerVal = Src2.PointerVal;
  1747. break;
  1748. }
  1749. SetValue(&I, Dest, SF);
  1750. }
  1751. GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
  1752. ExecutionContext &SF) {
  1753. switch (CE->getOpcode()) {
  1754. case Instruction::Trunc:
  1755. return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
  1756. case Instruction::ZExt:
  1757. return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
  1758. case Instruction::SExt:
  1759. return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
  1760. case Instruction::FPTrunc:
  1761. return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
  1762. case Instruction::FPExt:
  1763. return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
  1764. case Instruction::UIToFP:
  1765. return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1766. case Instruction::SIToFP:
  1767. return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1768. case Instruction::FPToUI:
  1769. return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
  1770. case Instruction::FPToSI:
  1771. return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
  1772. case Instruction::PtrToInt:
  1773. return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
  1774. case Instruction::IntToPtr:
  1775. return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
  1776. case Instruction::BitCast:
  1777. return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
  1778. case Instruction::GetElementPtr:
  1779. return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
  1780. gep_type_end(CE), SF);
  1781. case Instruction::FCmp:
  1782. case Instruction::ICmp:
  1783. return executeCmpInst(CE->getPredicate(),
  1784. getOperandValue(CE->getOperand(0), SF),
  1785. getOperandValue(CE->getOperand(1), SF),
  1786. CE->getOperand(0)->getType());
  1787. case Instruction::Select:
  1788. return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
  1789. getOperandValue(CE->getOperand(1), SF),
  1790. getOperandValue(CE->getOperand(2), SF),
  1791. CE->getOperand(0)->getType());
  1792. default :
  1793. break;
  1794. }
  1795. // The cases below here require a GenericValue parameter for the result
  1796. // so we initialize one, compute it and then return it.
  1797. GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
  1798. GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
  1799. GenericValue Dest;
  1800. Type * Ty = CE->getOperand(0)->getType();
  1801. switch (CE->getOpcode()) {
  1802. case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
  1803. case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
  1804. case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
  1805. case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
  1806. case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
  1807. case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
  1808. case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
  1809. case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
  1810. case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
  1811. case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
  1812. case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
  1813. case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
  1814. case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
  1815. case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
  1816. case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
  1817. case Instruction::Shl:
  1818. Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
  1819. break;
  1820. case Instruction::LShr:
  1821. Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
  1822. break;
  1823. case Instruction::AShr:
  1824. Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
  1825. break;
  1826. default:
  1827. dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
  1828. llvm_unreachable("Unhandled ConstantExpr");
  1829. }
  1830. return Dest;
  1831. }
  1832. GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
  1833. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  1834. return getConstantExprValue(CE, SF);
  1835. } else if (Constant *CPV = dyn_cast<Constant>(V)) {
  1836. return getConstantValue(CPV);
  1837. } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
  1838. return PTOGV(getPointerToGlobal(GV));
  1839. } else {
  1840. return SF.Values[V];
  1841. }
  1842. }
  1843. //===----------------------------------------------------------------------===//
  1844. // Dispatch and Execution Code
  1845. //===----------------------------------------------------------------------===//
  1846. //===----------------------------------------------------------------------===//
  1847. // callFunction - Execute the specified function...
  1848. //
  1849. void Interpreter::callFunction(Function *F,
  1850. const std::vector<GenericValue> &ArgVals) {
  1851. assert((ECStack.empty() || ECStack.back().Caller.getInstruction() == 0 ||
  1852. ECStack.back().Caller.arg_size() == ArgVals.size()) &&
  1853. "Incorrect number of arguments passed into function call!");
  1854. // Make a new stack frame... and fill it in.
  1855. ECStack.push_back(ExecutionContext());
  1856. ExecutionContext &StackFrame = ECStack.back();
  1857. StackFrame.CurFunction = F;
  1858. // Special handling for external functions.
  1859. if (F->isDeclaration()) {
  1860. GenericValue Result = callExternalFunction (F, ArgVals);
  1861. // Simulate a 'ret' instruction of the appropriate type.
  1862. popStackAndReturnValueToCaller (F->getReturnType (), Result);
  1863. return;
  1864. }
  1865. // Get pointers to first LLVM BB & Instruction in function.
  1866. StackFrame.CurBB = F->begin();
  1867. StackFrame.CurInst = StackFrame.CurBB->begin();
  1868. // Run through the function arguments and initialize their values...
  1869. assert((ArgVals.size() == F->arg_size() ||
  1870. (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
  1871. "Invalid number of values passed to function invocation!");
  1872. // Handle non-varargs arguments...
  1873. unsigned i = 0;
  1874. for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
  1875. AI != E; ++AI, ++i)
  1876. SetValue(AI, ArgVals[i], StackFrame);
  1877. // Handle varargs arguments...
  1878. StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
  1879. }
  1880. void Interpreter::run() {
  1881. while (!ECStack.empty()) {
  1882. // Interpret a single instruction & increment the "PC".
  1883. ExecutionContext &SF = ECStack.back(); // Current stack frame
  1884. Instruction &I = *SF.CurInst++; // Increment before execute
  1885. // Track the number of dynamic instructions executed.
  1886. ++NumDynamicInsts;
  1887. DEBUG(dbgs() << "About to interpret: " << I);
  1888. visit(I); // Dispatch to one of the visit* methods...
  1889. #if 0
  1890. // This is not safe, as visiting the instruction could lower it and free I.
  1891. DEBUG(
  1892. if (!isa<CallInst>(I) && !isa<InvokeInst>(I) &&
  1893. I.getType() != Type::VoidTy) {
  1894. dbgs() << " --> ";
  1895. const GenericValue &Val = SF.Values[&I];
  1896. switch (I.getType()->getTypeID()) {
  1897. default: llvm_unreachable("Invalid GenericValue Type");
  1898. case Type::VoidTyID: dbgs() << "void"; break;
  1899. case Type::FloatTyID: dbgs() << "float " << Val.FloatVal; break;
  1900. case Type::DoubleTyID: dbgs() << "double " << Val.DoubleVal; break;
  1901. case Type::PointerTyID: dbgs() << "void* " << intptr_t(Val.PointerVal);
  1902. break;
  1903. case Type::IntegerTyID:
  1904. dbgs() << "i" << Val.IntVal.getBitWidth() << " "
  1905. << Val.IntVal.toStringUnsigned(10)
  1906. << " (0x" << Val.IntVal.toStringUnsigned(16) << ")\n";
  1907. break;
  1908. }
  1909. });
  1910. #endif
  1911. }
  1912. }