FastISel.cpp 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. ///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the implementation of the FastISel class.
  11. //
  12. // "Fast" instruction selection is designed to emit very poor code quickly.
  13. // Also, it is not designed to be able to do much lowering, so most illegal
  14. // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
  15. // also not intended to be able to do much optimization, except in a few cases
  16. // where doing optimizations reduces overall compile time. For example, folding
  17. // constants into immediate fields is often done, because it's cheap and it
  18. // reduces the number of instructions later phases have to examine.
  19. //
  20. // "Fast" instruction selection is able to fail gracefully and transfer
  21. // control to the SelectionDAG selector for operations that it doesn't
  22. // support. In many cases, this allows us to avoid duplicating a lot of
  23. // the complicated lowering logic that SelectionDAG currently has.
  24. //
  25. // The intended use for "fast" instruction selection is "-O0" mode
  26. // compilation, where the quality of the generated code is irrelevant when
  27. // weighed against the speed at which the code can be generated. Also,
  28. // at -O0, the LLVM optimizers are not running, and this makes the
  29. // compile time of codegen a much higher portion of the overall compile
  30. // time. Despite its limitations, "fast" instruction selection is able to
  31. // handle enough code on its own to provide noticeable overall speedups
  32. // in -O0 compiles.
  33. //
  34. // Basic operations are supported in a target-independent way, by reading
  35. // the same instruction descriptions that the SelectionDAG selector reads,
  36. // and identifying simple arithmetic operations that can be directly selected
  37. // from simple operators. More complicated operations currently require
  38. // target-specific code.
  39. //
  40. //===----------------------------------------------------------------------===//
  41. #include "llvm/Function.h"
  42. #include "llvm/GlobalVariable.h"
  43. #include "llvm/Instructions.h"
  44. #include "llvm/IntrinsicInst.h"
  45. #include "llvm/CodeGen/FastISel.h"
  46. #include "llvm/CodeGen/MachineInstrBuilder.h"
  47. #include "llvm/CodeGen/MachineModuleInfo.h"
  48. #include "llvm/CodeGen/MachineRegisterInfo.h"
  49. #include "llvm/CodeGen/DebugLoc.h"
  50. #include "llvm/CodeGen/DwarfWriter.h"
  51. #include "llvm/Analysis/DebugInfo.h"
  52. #include "llvm/Target/TargetData.h"
  53. #include "llvm/Target/TargetInstrInfo.h"
  54. #include "llvm/Target/TargetLowering.h"
  55. #include "llvm/Target/TargetMachine.h"
  56. #include "SelectionDAGBuild.h"
  57. using namespace llvm;
  58. unsigned FastISel::getRegForValue(Value *V) {
  59. MVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
  60. // Don't handle non-simple values in FastISel.
  61. if (!RealVT.isSimple())
  62. return 0;
  63. // Ignore illegal types. We must do this before looking up the value
  64. // in ValueMap because Arguments are given virtual registers regardless
  65. // of whether FastISel can handle them.
  66. MVT::SimpleValueType VT = RealVT.getSimpleVT();
  67. if (!TLI.isTypeLegal(VT)) {
  68. // Promote MVT::i1 to a legal type though, because it's common and easy.
  69. if (VT == MVT::i1)
  70. VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
  71. else
  72. return 0;
  73. }
  74. // Look up the value to see if we already have a register for it. We
  75. // cache values defined by Instructions across blocks, and other values
  76. // only locally. This is because Instructions already have the SSA
  77. // def-dominatess-use requirement enforced.
  78. if (ValueMap.count(V))
  79. return ValueMap[V];
  80. unsigned Reg = LocalValueMap[V];
  81. if (Reg != 0)
  82. return Reg;
  83. if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
  84. if (CI->getValue().getActiveBits() <= 64)
  85. Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
  86. } else if (isa<AllocaInst>(V)) {
  87. Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
  88. } else if (isa<ConstantPointerNull>(V)) {
  89. // Translate this as an integer zero so that it can be
  90. // local-CSE'd with actual integer zeros.
  91. Reg = getRegForValue(Constant::getNullValue(TD.getIntPtrType()));
  92. } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
  93. Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
  94. if (!Reg) {
  95. const APFloat &Flt = CF->getValueAPF();
  96. MVT IntVT = TLI.getPointerTy();
  97. uint64_t x[2];
  98. uint32_t IntBitWidth = IntVT.getSizeInBits();
  99. bool isExact;
  100. (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
  101. APFloat::rmTowardZero, &isExact);
  102. if (isExact) {
  103. APInt IntVal(IntBitWidth, 2, x);
  104. unsigned IntegerReg = getRegForValue(ConstantInt::get(IntVal));
  105. if (IntegerReg != 0)
  106. Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
  107. }
  108. }
  109. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  110. if (!SelectOperator(CE, CE->getOpcode())) return 0;
  111. Reg = LocalValueMap[CE];
  112. } else if (isa<UndefValue>(V)) {
  113. Reg = createResultReg(TLI.getRegClassFor(VT));
  114. BuildMI(MBB, DL, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
  115. }
  116. // If target-independent code couldn't handle the value, give target-specific
  117. // code a try.
  118. if (!Reg && isa<Constant>(V))
  119. Reg = TargetMaterializeConstant(cast<Constant>(V));
  120. // Don't cache constant materializations in the general ValueMap.
  121. // To do so would require tracking what uses they dominate.
  122. if (Reg != 0)
  123. LocalValueMap[V] = Reg;
  124. return Reg;
  125. }
  126. unsigned FastISel::lookUpRegForValue(Value *V) {
  127. // Look up the value to see if we already have a register for it. We
  128. // cache values defined by Instructions across blocks, and other values
  129. // only locally. This is because Instructions already have the SSA
  130. // def-dominatess-use requirement enforced.
  131. if (ValueMap.count(V))
  132. return ValueMap[V];
  133. return LocalValueMap[V];
  134. }
  135. /// UpdateValueMap - Update the value map to include the new mapping for this
  136. /// instruction, or insert an extra copy to get the result in a previous
  137. /// determined register.
  138. /// NOTE: This is only necessary because we might select a block that uses
  139. /// a value before we select the block that defines the value. It might be
  140. /// possible to fix this by selecting blocks in reverse postorder.
  141. unsigned FastISel::UpdateValueMap(Value* I, unsigned Reg) {
  142. if (!isa<Instruction>(I)) {
  143. LocalValueMap[I] = Reg;
  144. return Reg;
  145. }
  146. unsigned &AssignedReg = ValueMap[I];
  147. if (AssignedReg == 0)
  148. AssignedReg = Reg;
  149. else if (Reg != AssignedReg) {
  150. const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
  151. TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
  152. Reg, RegClass, RegClass);
  153. }
  154. return AssignedReg;
  155. }
  156. unsigned FastISel::getRegForGEPIndex(Value *Idx) {
  157. unsigned IdxN = getRegForValue(Idx);
  158. if (IdxN == 0)
  159. // Unhandled operand. Halt "fast" selection and bail.
  160. return 0;
  161. // If the index is smaller or larger than intptr_t, truncate or extend it.
  162. MVT PtrVT = TLI.getPointerTy();
  163. MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
  164. if (IdxVT.bitsLT(PtrVT))
  165. IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT.getSimpleVT(),
  166. ISD::SIGN_EXTEND, IdxN);
  167. else if (IdxVT.bitsGT(PtrVT))
  168. IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT.getSimpleVT(),
  169. ISD::TRUNCATE, IdxN);
  170. return IdxN;
  171. }
  172. /// SelectBinaryOp - Select and emit code for a binary operator instruction,
  173. /// which has an opcode which directly corresponds to the given ISD opcode.
  174. ///
  175. bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
  176. MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
  177. if (VT == MVT::Other || !VT.isSimple())
  178. // Unhandled type. Halt "fast" selection and bail.
  179. return false;
  180. // We only handle legal types. For example, on x86-32 the instruction
  181. // selector contains all of the 64-bit instructions from x86-64,
  182. // under the assumption that i64 won't be used if the target doesn't
  183. // support it.
  184. if (!TLI.isTypeLegal(VT)) {
  185. // MVT::i1 is special. Allow AND, OR, or XOR because they
  186. // don't require additional zeroing, which makes them easy.
  187. if (VT == MVT::i1 &&
  188. (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
  189. ISDOpcode == ISD::XOR))
  190. VT = TLI.getTypeToTransformTo(VT);
  191. else
  192. return false;
  193. }
  194. unsigned Op0 = getRegForValue(I->getOperand(0));
  195. if (Op0 == 0)
  196. // Unhandled operand. Halt "fast" selection and bail.
  197. return false;
  198. // Check if the second operand is a constant and handle it appropriately.
  199. if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
  200. unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
  201. ISDOpcode, Op0, CI->getZExtValue());
  202. if (ResultReg != 0) {
  203. // We successfully emitted code for the given LLVM Instruction.
  204. UpdateValueMap(I, ResultReg);
  205. return true;
  206. }
  207. }
  208. // Check if the second operand is a constant float.
  209. if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
  210. unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
  211. ISDOpcode, Op0, CF);
  212. if (ResultReg != 0) {
  213. // We successfully emitted code for the given LLVM Instruction.
  214. UpdateValueMap(I, ResultReg);
  215. return true;
  216. }
  217. }
  218. unsigned Op1 = getRegForValue(I->getOperand(1));
  219. if (Op1 == 0)
  220. // Unhandled operand. Halt "fast" selection and bail.
  221. return false;
  222. // Now we have both operands in registers. Emit the instruction.
  223. unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
  224. ISDOpcode, Op0, Op1);
  225. if (ResultReg == 0)
  226. // Target-specific code wasn't able to find a machine opcode for
  227. // the given ISD opcode and type. Halt "fast" selection and bail.
  228. return false;
  229. // We successfully emitted code for the given LLVM Instruction.
  230. UpdateValueMap(I, ResultReg);
  231. return true;
  232. }
  233. bool FastISel::SelectGetElementPtr(User *I) {
  234. unsigned N = getRegForValue(I->getOperand(0));
  235. if (N == 0)
  236. // Unhandled operand. Halt "fast" selection and bail.
  237. return false;
  238. const Type *Ty = I->getOperand(0)->getType();
  239. MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
  240. for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
  241. OI != E; ++OI) {
  242. Value *Idx = *OI;
  243. if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
  244. unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
  245. if (Field) {
  246. // N = N + Offset
  247. uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
  248. // FIXME: This can be optimized by combining the add with a
  249. // subsequent one.
  250. N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
  251. if (N == 0)
  252. // Unhandled operand. Halt "fast" selection and bail.
  253. return false;
  254. }
  255. Ty = StTy->getElementType(Field);
  256. } else {
  257. Ty = cast<SequentialType>(Ty)->getElementType();
  258. // If this is a constant subscript, handle it quickly.
  259. if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
  260. if (CI->getZExtValue() == 0) continue;
  261. uint64_t Offs =
  262. TD.getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
  263. N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
  264. if (N == 0)
  265. // Unhandled operand. Halt "fast" selection and bail.
  266. return false;
  267. continue;
  268. }
  269. // N = N + Idx * ElementSize;
  270. uint64_t ElementSize = TD.getTypePaddedSize(Ty);
  271. unsigned IdxN = getRegForGEPIndex(Idx);
  272. if (IdxN == 0)
  273. // Unhandled operand. Halt "fast" selection and bail.
  274. return false;
  275. if (ElementSize != 1) {
  276. IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
  277. if (IdxN == 0)
  278. // Unhandled operand. Halt "fast" selection and bail.
  279. return false;
  280. }
  281. N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
  282. if (N == 0)
  283. // Unhandled operand. Halt "fast" selection and bail.
  284. return false;
  285. }
  286. }
  287. // We successfully emitted code for the given LLVM Instruction.
  288. UpdateValueMap(I, N);
  289. return true;
  290. }
  291. bool FastISel::SelectCall(User *I) {
  292. Function *F = cast<CallInst>(I)->getCalledFunction();
  293. if (!F) return false;
  294. unsigned IID = F->getIntrinsicID();
  295. switch (IID) {
  296. default: break;
  297. case Intrinsic::dbg_stoppoint: {
  298. DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
  299. if (DW && DW->ValidDebugInfo(SPI->getContext(), true)) {
  300. DICompileUnit CU(cast<GlobalVariable>(SPI->getContext()));
  301. std::string Dir, FN;
  302. unsigned SrcFile = DW->getOrCreateSourceID(CU.getDirectory(Dir),
  303. CU.getFilename(FN));
  304. unsigned Line = SPI->getLine();
  305. unsigned Col = SPI->getColumn();
  306. unsigned ID = DW->RecordSourceLine(Line, Col, SrcFile);
  307. unsigned Idx = MF.getOrCreateDebugLocID(SrcFile, Line, Col);
  308. setCurDebugLoc(DebugLoc::get(Idx));
  309. const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
  310. BuildMI(MBB, DL, II).addImm(ID);
  311. }
  312. return true;
  313. }
  314. case Intrinsic::dbg_region_start: {
  315. DbgRegionStartInst *RSI = cast<DbgRegionStartInst>(I);
  316. if (DW && DW->ValidDebugInfo(RSI->getContext(), true)) {
  317. unsigned ID =
  318. DW->RecordRegionStart(cast<GlobalVariable>(RSI->getContext()));
  319. const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
  320. BuildMI(MBB, DL, II).addImm(ID);
  321. }
  322. return true;
  323. }
  324. case Intrinsic::dbg_region_end: {
  325. DbgRegionEndInst *REI = cast<DbgRegionEndInst>(I);
  326. if (DW && DW->ValidDebugInfo(REI->getContext(), true)) {
  327. unsigned ID = 0;
  328. DISubprogram Subprogram(cast<GlobalVariable>(REI->getContext()));
  329. if (!Subprogram.isNull() && !Subprogram.describes(MF.getFunction())) {
  330. // This is end of an inlined function.
  331. const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
  332. ID = DW->RecordInlinedFnEnd(Subprogram);
  333. if (ID)
  334. // Returned ID is 0 if this is unbalanced "end of inlined
  335. // scope". This could happen if optimizer eats dbg intrinsics
  336. // or "beginning of inlined scope" is not recoginized due to
  337. // missing location info. In such cases, do ignore this region.end.
  338. BuildMI(MBB, DL, II).addImm(ID);
  339. } else {
  340. const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
  341. ID = DW->RecordRegionEnd(cast<GlobalVariable>(REI->getContext()));
  342. BuildMI(MBB, DL, II).addImm(ID);
  343. }
  344. }
  345. return true;
  346. }
  347. case Intrinsic::dbg_func_start: {
  348. if (!DW) return true;
  349. DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
  350. Value *SP = FSI->getSubprogram();
  351. if (DW->ValidDebugInfo(SP, true)) {
  352. // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is what
  353. // (most?) gdb expects.
  354. DebugLoc PrevLoc = DL;
  355. DISubprogram Subprogram(cast<GlobalVariable>(SP));
  356. DICompileUnit CompileUnit = Subprogram.getCompileUnit();
  357. std::string Dir, FN;
  358. unsigned SrcFile = DW->getOrCreateSourceID(CompileUnit.getDirectory(Dir),
  359. CompileUnit.getFilename(FN));
  360. if (!Subprogram.describes(MF.getFunction())) {
  361. // This is a beginning of an inlined function.
  362. // If llvm.dbg.func.start is seen in a new block before any
  363. // llvm.dbg.stoppoint intrinsic then the location info is unknown.
  364. // FIXME : Why DebugLoc is reset at the beginning of each block ?
  365. if (PrevLoc.isUnknown())
  366. return true;
  367. // Record the source line.
  368. unsigned Line = Subprogram.getLineNumber();
  369. unsigned LabelID = DW->RecordSourceLine(Line, 0, SrcFile);
  370. setCurDebugLoc(DebugLoc::get(MF.getOrCreateDebugLocID(SrcFile, Line, 0)));
  371. const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
  372. BuildMI(MBB, DL, II).addImm(LabelID);
  373. DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
  374. DW->RecordInlinedFnStart(FSI, Subprogram, LabelID,
  375. PrevLocTpl.Src,
  376. PrevLocTpl.Line,
  377. PrevLocTpl.Col);
  378. } else {
  379. // Record the source line.
  380. unsigned Line = Subprogram.getLineNumber();
  381. setCurDebugLoc(DebugLoc::get(MF.getOrCreateDebugLocID(SrcFile, Line, 0)));
  382. DW->RecordSourceLine(Line, 0, SrcFile);
  383. // llvm.dbg.func_start also defines beginning of function scope.
  384. DW->RecordRegionStart(cast<GlobalVariable>(FSI->getSubprogram()));
  385. }
  386. }
  387. return true;
  388. }
  389. case Intrinsic::dbg_declare: {
  390. DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
  391. Value *Variable = DI->getVariable();
  392. if (DW && DW->ValidDebugInfo(Variable, true)) {
  393. // Determine the address of the declared object.
  394. Value *Address = DI->getAddress();
  395. if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
  396. Address = BCI->getOperand(0);
  397. AllocaInst *AI = dyn_cast<AllocaInst>(Address);
  398. // Don't handle byval struct arguments or VLAs, for example.
  399. if (!AI) break;
  400. DenseMap<const AllocaInst*, int>::iterator SI =
  401. StaticAllocaMap.find(AI);
  402. if (SI == StaticAllocaMap.end()) break; // VLAs.
  403. int FI = SI->second;
  404. // Determine the debug globalvariable.
  405. GlobalValue *GV = cast<GlobalVariable>(Variable);
  406. // Build the DECLARE instruction.
  407. const TargetInstrDesc &II = TII.get(TargetInstrInfo::DECLARE);
  408. MachineInstr *DeclareMI
  409. = BuildMI(MBB, DL, II).addFrameIndex(FI).addGlobalAddress(GV);
  410. DIVariable DV(cast<GlobalVariable>(GV));
  411. if (!DV.isNull()) {
  412. // This is a local variable
  413. DW->RecordVariableScope(DV, DeclareMI);
  414. }
  415. }
  416. return true;
  417. }
  418. case Intrinsic::eh_exception: {
  419. MVT VT = TLI.getValueType(I->getType());
  420. switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
  421. default: break;
  422. case TargetLowering::Expand: {
  423. if (!MBB->isLandingPad()) {
  424. // FIXME: Mark exception register as live in. Hack for PR1508.
  425. unsigned Reg = TLI.getExceptionAddressRegister();
  426. if (Reg) MBB->addLiveIn(Reg);
  427. }
  428. unsigned Reg = TLI.getExceptionAddressRegister();
  429. const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
  430. unsigned ResultReg = createResultReg(RC);
  431. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  432. Reg, RC, RC);
  433. assert(InsertedCopy && "Can't copy address registers!");
  434. InsertedCopy = InsertedCopy;
  435. UpdateValueMap(I, ResultReg);
  436. return true;
  437. }
  438. }
  439. break;
  440. }
  441. case Intrinsic::eh_selector_i32:
  442. case Intrinsic::eh_selector_i64: {
  443. MVT VT = TLI.getValueType(I->getType());
  444. switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
  445. default: break;
  446. case TargetLowering::Expand: {
  447. MVT VT = (IID == Intrinsic::eh_selector_i32 ?
  448. MVT::i32 : MVT::i64);
  449. if (MMI) {
  450. if (MBB->isLandingPad())
  451. AddCatchInfo(*cast<CallInst>(I), MMI, MBB);
  452. else {
  453. #ifndef NDEBUG
  454. CatchInfoLost.insert(cast<CallInst>(I));
  455. #endif
  456. // FIXME: Mark exception selector register as live in. Hack for PR1508.
  457. unsigned Reg = TLI.getExceptionSelectorRegister();
  458. if (Reg) MBB->addLiveIn(Reg);
  459. }
  460. unsigned Reg = TLI.getExceptionSelectorRegister();
  461. const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
  462. unsigned ResultReg = createResultReg(RC);
  463. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  464. Reg, RC, RC);
  465. assert(InsertedCopy && "Can't copy address registers!");
  466. InsertedCopy = InsertedCopy;
  467. UpdateValueMap(I, ResultReg);
  468. } else {
  469. unsigned ResultReg =
  470. getRegForValue(Constant::getNullValue(I->getType()));
  471. UpdateValueMap(I, ResultReg);
  472. }
  473. return true;
  474. }
  475. }
  476. break;
  477. }
  478. }
  479. return false;
  480. }
  481. bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
  482. MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
  483. MVT DstVT = TLI.getValueType(I->getType());
  484. if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
  485. DstVT == MVT::Other || !DstVT.isSimple())
  486. // Unhandled type. Halt "fast" selection and bail.
  487. return false;
  488. // Check if the destination type is legal. Or as a special case,
  489. // it may be i1 if we're doing a truncate because that's
  490. // easy and somewhat common.
  491. if (!TLI.isTypeLegal(DstVT))
  492. if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
  493. // Unhandled type. Halt "fast" selection and bail.
  494. return false;
  495. // Check if the source operand is legal. Or as a special case,
  496. // it may be i1 if we're doing zero-extension because that's
  497. // easy and somewhat common.
  498. if (!TLI.isTypeLegal(SrcVT))
  499. if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
  500. // Unhandled type. Halt "fast" selection and bail.
  501. return false;
  502. unsigned InputReg = getRegForValue(I->getOperand(0));
  503. if (!InputReg)
  504. // Unhandled operand. Halt "fast" selection and bail.
  505. return false;
  506. // If the operand is i1, arrange for the high bits in the register to be zero.
  507. if (SrcVT == MVT::i1) {
  508. SrcVT = TLI.getTypeToTransformTo(SrcVT);
  509. InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
  510. if (!InputReg)
  511. return false;
  512. }
  513. // If the result is i1, truncate to the target's type for i1 first.
  514. if (DstVT == MVT::i1)
  515. DstVT = TLI.getTypeToTransformTo(DstVT);
  516. unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
  517. DstVT.getSimpleVT(),
  518. Opcode,
  519. InputReg);
  520. if (!ResultReg)
  521. return false;
  522. UpdateValueMap(I, ResultReg);
  523. return true;
  524. }
  525. bool FastISel::SelectBitCast(User *I) {
  526. // If the bitcast doesn't change the type, just use the operand value.
  527. if (I->getType() == I->getOperand(0)->getType()) {
  528. unsigned Reg = getRegForValue(I->getOperand(0));
  529. if (Reg == 0)
  530. return false;
  531. UpdateValueMap(I, Reg);
  532. return true;
  533. }
  534. // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
  535. MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
  536. MVT DstVT = TLI.getValueType(I->getType());
  537. if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
  538. DstVT == MVT::Other || !DstVT.isSimple() ||
  539. !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
  540. // Unhandled type. Halt "fast" selection and bail.
  541. return false;
  542. unsigned Op0 = getRegForValue(I->getOperand(0));
  543. if (Op0 == 0)
  544. // Unhandled operand. Halt "fast" selection and bail.
  545. return false;
  546. // First, try to perform the bitcast by inserting a reg-reg copy.
  547. unsigned ResultReg = 0;
  548. if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
  549. TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
  550. TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
  551. ResultReg = createResultReg(DstClass);
  552. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  553. Op0, DstClass, SrcClass);
  554. if (!InsertedCopy)
  555. ResultReg = 0;
  556. }
  557. // If the reg-reg copy failed, select a BIT_CONVERT opcode.
  558. if (!ResultReg)
  559. ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
  560. ISD::BIT_CONVERT, Op0);
  561. if (!ResultReg)
  562. return false;
  563. UpdateValueMap(I, ResultReg);
  564. return true;
  565. }
  566. bool
  567. FastISel::SelectInstruction(Instruction *I) {
  568. return SelectOperator(I, I->getOpcode());
  569. }
  570. /// FastEmitBranch - Emit an unconditional branch to the given block,
  571. /// unless it is the immediate (fall-through) successor, and update
  572. /// the CFG.
  573. void
  574. FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
  575. MachineFunction::iterator NextMBB =
  576. next(MachineFunction::iterator(MBB));
  577. if (MBB->isLayoutSuccessor(MSucc)) {
  578. // The unconditional fall-through case, which needs no instructions.
  579. } else {
  580. // The unconditional branch case.
  581. TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
  582. }
  583. MBB->addSuccessor(MSucc);
  584. }
  585. bool
  586. FastISel::SelectOperator(User *I, unsigned Opcode) {
  587. switch (Opcode) {
  588. case Instruction::Add: {
  589. ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD;
  590. return SelectBinaryOp(I, Opc);
  591. }
  592. case Instruction::Sub: {
  593. ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB;
  594. return SelectBinaryOp(I, Opc);
  595. }
  596. case Instruction::Mul: {
  597. ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL;
  598. return SelectBinaryOp(I, Opc);
  599. }
  600. case Instruction::SDiv:
  601. return SelectBinaryOp(I, ISD::SDIV);
  602. case Instruction::UDiv:
  603. return SelectBinaryOp(I, ISD::UDIV);
  604. case Instruction::FDiv:
  605. return SelectBinaryOp(I, ISD::FDIV);
  606. case Instruction::SRem:
  607. return SelectBinaryOp(I, ISD::SREM);
  608. case Instruction::URem:
  609. return SelectBinaryOp(I, ISD::UREM);
  610. case Instruction::FRem:
  611. return SelectBinaryOp(I, ISD::FREM);
  612. case Instruction::Shl:
  613. return SelectBinaryOp(I, ISD::SHL);
  614. case Instruction::LShr:
  615. return SelectBinaryOp(I, ISD::SRL);
  616. case Instruction::AShr:
  617. return SelectBinaryOp(I, ISD::SRA);
  618. case Instruction::And:
  619. return SelectBinaryOp(I, ISD::AND);
  620. case Instruction::Or:
  621. return SelectBinaryOp(I, ISD::OR);
  622. case Instruction::Xor:
  623. return SelectBinaryOp(I, ISD::XOR);
  624. case Instruction::GetElementPtr:
  625. return SelectGetElementPtr(I);
  626. case Instruction::Br: {
  627. BranchInst *BI = cast<BranchInst>(I);
  628. if (BI->isUnconditional()) {
  629. BasicBlock *LLVMSucc = BI->getSuccessor(0);
  630. MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
  631. FastEmitBranch(MSucc);
  632. return true;
  633. }
  634. // Conditional branches are not handed yet.
  635. // Halt "fast" selection and bail.
  636. return false;
  637. }
  638. case Instruction::Unreachable:
  639. // Nothing to emit.
  640. return true;
  641. case Instruction::PHI:
  642. // PHI nodes are already emitted.
  643. return true;
  644. case Instruction::Alloca:
  645. // FunctionLowering has the static-sized case covered.
  646. if (StaticAllocaMap.count(cast<AllocaInst>(I)))
  647. return true;
  648. // Dynamic-sized alloca is not handled yet.
  649. return false;
  650. case Instruction::Call:
  651. return SelectCall(I);
  652. case Instruction::BitCast:
  653. return SelectBitCast(I);
  654. case Instruction::FPToSI:
  655. return SelectCast(I, ISD::FP_TO_SINT);
  656. case Instruction::ZExt:
  657. return SelectCast(I, ISD::ZERO_EXTEND);
  658. case Instruction::SExt:
  659. return SelectCast(I, ISD::SIGN_EXTEND);
  660. case Instruction::Trunc:
  661. return SelectCast(I, ISD::TRUNCATE);
  662. case Instruction::SIToFP:
  663. return SelectCast(I, ISD::SINT_TO_FP);
  664. case Instruction::IntToPtr: // Deliberate fall-through.
  665. case Instruction::PtrToInt: {
  666. MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
  667. MVT DstVT = TLI.getValueType(I->getType());
  668. if (DstVT.bitsGT(SrcVT))
  669. return SelectCast(I, ISD::ZERO_EXTEND);
  670. if (DstVT.bitsLT(SrcVT))
  671. return SelectCast(I, ISD::TRUNCATE);
  672. unsigned Reg = getRegForValue(I->getOperand(0));
  673. if (Reg == 0) return false;
  674. UpdateValueMap(I, Reg);
  675. return true;
  676. }
  677. default:
  678. // Unhandled instruction. Halt "fast" selection and bail.
  679. return false;
  680. }
  681. }
  682. FastISel::FastISel(MachineFunction &mf,
  683. MachineModuleInfo *mmi,
  684. DwarfWriter *dw,
  685. DenseMap<const Value *, unsigned> &vm,
  686. DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
  687. DenseMap<const AllocaInst *, int> &am
  688. #ifndef NDEBUG
  689. , SmallSet<Instruction*, 8> &cil
  690. #endif
  691. )
  692. : MBB(0),
  693. ValueMap(vm),
  694. MBBMap(bm),
  695. StaticAllocaMap(am),
  696. #ifndef NDEBUG
  697. CatchInfoLost(cil),
  698. #endif
  699. MF(mf),
  700. MMI(mmi),
  701. DW(dw),
  702. MRI(MF.getRegInfo()),
  703. MFI(*MF.getFrameInfo()),
  704. MCP(*MF.getConstantPool()),
  705. TM(MF.getTarget()),
  706. TD(*TM.getTargetData()),
  707. TII(*TM.getInstrInfo()),
  708. TLI(*TM.getTargetLowering()) {
  709. }
  710. FastISel::~FastISel() {}
  711. unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
  712. ISD::NodeType) {
  713. return 0;
  714. }
  715. unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
  716. ISD::NodeType, unsigned /*Op0*/) {
  717. return 0;
  718. }
  719. unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
  720. ISD::NodeType, unsigned /*Op0*/,
  721. unsigned /*Op0*/) {
  722. return 0;
  723. }
  724. unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
  725. ISD::NodeType, uint64_t /*Imm*/) {
  726. return 0;
  727. }
  728. unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
  729. ISD::NodeType, ConstantFP * /*FPImm*/) {
  730. return 0;
  731. }
  732. unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
  733. ISD::NodeType, unsigned /*Op0*/,
  734. uint64_t /*Imm*/) {
  735. return 0;
  736. }
  737. unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
  738. ISD::NodeType, unsigned /*Op0*/,
  739. ConstantFP * /*FPImm*/) {
  740. return 0;
  741. }
  742. unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
  743. ISD::NodeType,
  744. unsigned /*Op0*/, unsigned /*Op1*/,
  745. uint64_t /*Imm*/) {
  746. return 0;
  747. }
  748. /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
  749. /// to emit an instruction with an immediate operand using FastEmit_ri.
  750. /// If that fails, it materializes the immediate into a register and try
  751. /// FastEmit_rr instead.
  752. unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
  753. unsigned Op0, uint64_t Imm,
  754. MVT::SimpleValueType ImmType) {
  755. // First check if immediate type is legal. If not, we can't use the ri form.
  756. unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
  757. if (ResultReg != 0)
  758. return ResultReg;
  759. unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
  760. if (MaterialReg == 0)
  761. return 0;
  762. return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
  763. }
  764. /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
  765. /// to emit an instruction with a floating-point immediate operand using
  766. /// FastEmit_rf. If that fails, it materializes the immediate into a register
  767. /// and try FastEmit_rr instead.
  768. unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
  769. unsigned Op0, ConstantFP *FPImm,
  770. MVT::SimpleValueType ImmType) {
  771. // First check if immediate type is legal. If not, we can't use the rf form.
  772. unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
  773. if (ResultReg != 0)
  774. return ResultReg;
  775. // Materialize the constant in a register.
  776. unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
  777. if (MaterialReg == 0) {
  778. // If the target doesn't have a way to directly enter a floating-point
  779. // value into a register, use an alternate approach.
  780. // TODO: The current approach only supports floating-point constants
  781. // that can be constructed by conversion from integer values. This should
  782. // be replaced by code that creates a load from a constant-pool entry,
  783. // which will require some target-specific work.
  784. const APFloat &Flt = FPImm->getValueAPF();
  785. MVT IntVT = TLI.getPointerTy();
  786. uint64_t x[2];
  787. uint32_t IntBitWidth = IntVT.getSizeInBits();
  788. bool isExact;
  789. (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
  790. APFloat::rmTowardZero, &isExact);
  791. if (!isExact)
  792. return 0;
  793. APInt IntVal(IntBitWidth, 2, x);
  794. unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
  795. ISD::Constant, IntVal.getZExtValue());
  796. if (IntegerReg == 0)
  797. return 0;
  798. MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
  799. ISD::SINT_TO_FP, IntegerReg);
  800. if (MaterialReg == 0)
  801. return 0;
  802. }
  803. return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
  804. }
  805. unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
  806. return MRI.createVirtualRegister(RC);
  807. }
  808. unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
  809. const TargetRegisterClass* RC) {
  810. unsigned ResultReg = createResultReg(RC);
  811. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  812. BuildMI(MBB, DL, II, ResultReg);
  813. return ResultReg;
  814. }
  815. unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
  816. const TargetRegisterClass *RC,
  817. unsigned Op0) {
  818. unsigned ResultReg = createResultReg(RC);
  819. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  820. if (II.getNumDefs() >= 1)
  821. BuildMI(MBB, DL, II, ResultReg).addReg(Op0);
  822. else {
  823. BuildMI(MBB, DL, II).addReg(Op0);
  824. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  825. II.ImplicitDefs[0], RC, RC);
  826. if (!InsertedCopy)
  827. ResultReg = 0;
  828. }
  829. return ResultReg;
  830. }
  831. unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
  832. const TargetRegisterClass *RC,
  833. unsigned Op0, unsigned Op1) {
  834. unsigned ResultReg = createResultReg(RC);
  835. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  836. if (II.getNumDefs() >= 1)
  837. BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1);
  838. else {
  839. BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1);
  840. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  841. II.ImplicitDefs[0], RC, RC);
  842. if (!InsertedCopy)
  843. ResultReg = 0;
  844. }
  845. return ResultReg;
  846. }
  847. unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
  848. const TargetRegisterClass *RC,
  849. unsigned Op0, uint64_t Imm) {
  850. unsigned ResultReg = createResultReg(RC);
  851. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  852. if (II.getNumDefs() >= 1)
  853. BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm);
  854. else {
  855. BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm);
  856. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  857. II.ImplicitDefs[0], RC, RC);
  858. if (!InsertedCopy)
  859. ResultReg = 0;
  860. }
  861. return ResultReg;
  862. }
  863. unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
  864. const TargetRegisterClass *RC,
  865. unsigned Op0, ConstantFP *FPImm) {
  866. unsigned ResultReg = createResultReg(RC);
  867. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  868. if (II.getNumDefs() >= 1)
  869. BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm);
  870. else {
  871. BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm);
  872. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  873. II.ImplicitDefs[0], RC, RC);
  874. if (!InsertedCopy)
  875. ResultReg = 0;
  876. }
  877. return ResultReg;
  878. }
  879. unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
  880. const TargetRegisterClass *RC,
  881. unsigned Op0, unsigned Op1, uint64_t Imm) {
  882. unsigned ResultReg = createResultReg(RC);
  883. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  884. if (II.getNumDefs() >= 1)
  885. BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
  886. else {
  887. BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm);
  888. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  889. II.ImplicitDefs[0], RC, RC);
  890. if (!InsertedCopy)
  891. ResultReg = 0;
  892. }
  893. return ResultReg;
  894. }
  895. unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
  896. const TargetRegisterClass *RC,
  897. uint64_t Imm) {
  898. unsigned ResultReg = createResultReg(RC);
  899. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  900. if (II.getNumDefs() >= 1)
  901. BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
  902. else {
  903. BuildMI(MBB, DL, II).addImm(Imm);
  904. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  905. II.ImplicitDefs[0], RC, RC);
  906. if (!InsertedCopy)
  907. ResultReg = 0;
  908. }
  909. return ResultReg;
  910. }
  911. unsigned FastISel::FastEmitInst_extractsubreg(MVT::SimpleValueType RetVT,
  912. unsigned Op0, uint32_t Idx) {
  913. const TargetRegisterClass* RC = MRI.getRegClass(Op0);
  914. unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
  915. const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
  916. if (II.getNumDefs() >= 1)
  917. BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx);
  918. else {
  919. BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx);
  920. bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
  921. II.ImplicitDefs[0], RC, RC);
  922. if (!InsertedCopy)
  923. ResultReg = 0;
  924. }
  925. return ResultReg;
  926. }
  927. /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
  928. /// with all but the least significant bit set to zero.
  929. unsigned FastISel::FastEmitZExtFromI1(MVT::SimpleValueType VT, unsigned Op) {
  930. return FastEmit_ri(VT, VT, ISD::AND, Op, 1);
  931. }