FastISel.cpp 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the implementation of the FastISel class.
  11. //
  12. // "Fast" instruction selection is designed to emit very poor code quickly.
  13. // Also, it is not designed to be able to do much lowering, so most illegal
  14. // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
  15. // also not intended to be able to do much optimization, except in a few cases
  16. // where doing optimizations reduces overall compile time. For example, folding
  17. // constants into immediate fields is often done, because it's cheap and it
  18. // reduces the number of instructions later phases have to examine.
  19. //
  20. // "Fast" instruction selection is able to fail gracefully and transfer
  21. // control to the SelectionDAG selector for operations that it doesn't
  22. // support. In many cases, this allows us to avoid duplicating a lot of
  23. // the complicated lowering logic that SelectionDAG currently has.
  24. //
  25. // The intended use for "fast" instruction selection is "-O0" mode
  26. // compilation, where the quality of the generated code is irrelevant when
  27. // weighed against the speed at which the code can be generated. Also,
  28. // at -O0, the LLVM optimizers are not running, and this makes the
  29. // compile time of codegen a much higher portion of the overall compile
  30. // time. Despite its limitations, "fast" instruction selection is able to
  31. // handle enough code on its own to provide noticeable overall speedups
  32. // in -O0 compiles.
  33. //
  34. // Basic operations are supported in a target-independent way, by reading
  35. // the same instruction descriptions that the SelectionDAG selector reads,
  36. // and identifying simple arithmetic operations that can be directly selected
  37. // from simple operators. More complicated operations currently require
  38. // target-specific code.
  39. //
  40. //===----------------------------------------------------------------------===//
  41. #include "llvm/Function.h"
  42. #include "llvm/GlobalVariable.h"
  43. #include "llvm/Instructions.h"
  44. #include "llvm/IntrinsicInst.h"
  45. #include "llvm/CodeGen/FastISel.h"
  46. #include "llvm/CodeGen/FunctionLoweringInfo.h"
  47. #include "llvm/CodeGen/MachineInstrBuilder.h"
  48. #include "llvm/CodeGen/MachineModuleInfo.h"
  49. #include "llvm/CodeGen/MachineRegisterInfo.h"
  50. #include "llvm/Analysis/DebugInfo.h"
  51. #include "llvm/Analysis/Loads.h"
  52. #include "llvm/Target/TargetData.h"
  53. #include "llvm/Target/TargetInstrInfo.h"
  54. #include "llvm/Target/TargetLowering.h"
  55. #include "llvm/Target/TargetMachine.h"
  56. #include "llvm/Support/ErrorHandling.h"
  57. using namespace llvm;
  58. /// startNewBlock - Set the current block to which generated machine
  59. /// instructions will be appended, and clear the local CSE map.
  60. ///
  61. void FastISel::startNewBlock() {
  62. LocalValueMap.clear();
  63. // Start out as null, meaining no local-value instructions have
  64. // been emitted.
  65. LastLocalValue = 0;
  66. // Advance the last local value past any EH_LABEL instructions.
  67. MachineBasicBlock::iterator
  68. I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
  69. while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
  70. LastLocalValue = I;
  71. ++I;
  72. }
  73. }
  74. bool FastISel::hasTrivialKill(const Value *V) const {
  75. // Don't consider constants or arguments to have trivial kills.
  76. const Instruction *I = dyn_cast<Instruction>(V);
  77. if (!I)
  78. return false;
  79. // No-op casts are trivially coalesced by fast-isel.
  80. if (const CastInst *Cast = dyn_cast<CastInst>(I))
  81. if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
  82. !hasTrivialKill(Cast->getOperand(0)))
  83. return false;
  84. // Only instructions with a single use in the same basic block are considered
  85. // to have trivial kills.
  86. return I->hasOneUse() &&
  87. !(I->getOpcode() == Instruction::BitCast ||
  88. I->getOpcode() == Instruction::PtrToInt ||
  89. I->getOpcode() == Instruction::IntToPtr) &&
  90. cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
  91. }
  92. unsigned FastISel::getRegForValue(const Value *V) {
  93. EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
  94. // Don't handle non-simple values in FastISel.
  95. if (!RealVT.isSimple())
  96. return 0;
  97. // Ignore illegal types. We must do this before looking up the value
  98. // in ValueMap because Arguments are given virtual registers regardless
  99. // of whether FastISel can handle them.
  100. MVT VT = RealVT.getSimpleVT();
  101. if (!TLI.isTypeLegal(VT)) {
  102. // Promote MVT::i1 to a legal type though, because it's common and easy.
  103. if (VT == MVT::i1)
  104. VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
  105. else
  106. return 0;
  107. }
  108. // Look up the value to see if we already have a register for it. We
  109. // cache values defined by Instructions across blocks, and other values
  110. // only locally. This is because Instructions already have the SSA
  111. // def-dominates-use requirement enforced.
  112. DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
  113. if (I != FuncInfo.ValueMap.end()) {
  114. unsigned Reg = I->second;
  115. return Reg;
  116. }
  117. unsigned Reg = LocalValueMap[V];
  118. if (Reg != 0)
  119. return Reg;
  120. // In bottom-up mode, just create the virtual register which will be used
  121. // to hold the value. It will be materialized later.
  122. if (isa<Instruction>(V) &&
  123. (!isa<AllocaInst>(V) ||
  124. !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
  125. return FuncInfo.InitializeRegForValue(V);
  126. SavePoint SaveInsertPt = enterLocalValueArea();
  127. // Materialize the value in a register. Emit any instructions in the
  128. // local value area.
  129. Reg = materializeRegForValue(V, VT);
  130. leaveLocalValueArea(SaveInsertPt);
  131. return Reg;
  132. }
  133. /// materializeRegForValue - Helper for getRegForValue. This function is
  134. /// called when the value isn't already available in a register and must
  135. /// be materialized with new instructions.
  136. unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
  137. unsigned Reg = 0;
  138. if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
  139. if (CI->getValue().getActiveBits() <= 64)
  140. Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
  141. } else if (isa<AllocaInst>(V)) {
  142. Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
  143. } else if (isa<ConstantPointerNull>(V)) {
  144. // Translate this as an integer zero so that it can be
  145. // local-CSE'd with actual integer zeros.
  146. Reg =
  147. getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
  148. } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
  149. // Try to emit the constant directly.
  150. Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
  151. if (!Reg) {
  152. // Try to emit the constant by using an integer constant with a cast.
  153. const APFloat &Flt = CF->getValueAPF();
  154. EVT IntVT = TLI.getPointerTy();
  155. uint64_t x[2];
  156. uint32_t IntBitWidth = IntVT.getSizeInBits();
  157. bool isExact;
  158. (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
  159. APFloat::rmTowardZero, &isExact);
  160. if (isExact) {
  161. APInt IntVal(IntBitWidth, 2, x);
  162. unsigned IntegerReg =
  163. getRegForValue(ConstantInt::get(V->getContext(), IntVal));
  164. if (IntegerReg != 0)
  165. Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
  166. IntegerReg, /*Kill=*/false);
  167. }
  168. }
  169. } else if (const Operator *Op = dyn_cast<Operator>(V)) {
  170. if (!SelectOperator(Op, Op->getOpcode()))
  171. if (!isa<Instruction>(Op) ||
  172. !TargetSelectInstruction(cast<Instruction>(Op)))
  173. return 0;
  174. Reg = lookUpRegForValue(Op);
  175. } else if (isa<UndefValue>(V)) {
  176. Reg = createResultReg(TLI.getRegClassFor(VT));
  177. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
  178. TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
  179. }
  180. // If target-independent code couldn't handle the value, give target-specific
  181. // code a try.
  182. if (!Reg && isa<Constant>(V))
  183. Reg = TargetMaterializeConstant(cast<Constant>(V));
  184. // Don't cache constant materializations in the general ValueMap.
  185. // To do so would require tracking what uses they dominate.
  186. if (Reg != 0) {
  187. LocalValueMap[V] = Reg;
  188. LastLocalValue = MRI.getVRegDef(Reg);
  189. }
  190. return Reg;
  191. }
  192. unsigned FastISel::lookUpRegForValue(const Value *V) {
  193. // Look up the value to see if we already have a register for it. We
  194. // cache values defined by Instructions across blocks, and other values
  195. // only locally. This is because Instructions already have the SSA
  196. // def-dominates-use requirement enforced.
  197. DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
  198. if (I != FuncInfo.ValueMap.end())
  199. return I->second;
  200. return LocalValueMap[V];
  201. }
  202. /// UpdateValueMap - Update the value map to include the new mapping for this
  203. /// instruction, or insert an extra copy to get the result in a previous
  204. /// determined register.
  205. /// NOTE: This is only necessary because we might select a block that uses
  206. /// a value before we select the block that defines the value. It might be
  207. /// possible to fix this by selecting blocks in reverse postorder.
  208. unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
  209. if (!isa<Instruction>(I)) {
  210. LocalValueMap[I] = Reg;
  211. return Reg;
  212. }
  213. unsigned &AssignedReg = FuncInfo.ValueMap[I];
  214. if (AssignedReg == 0)
  215. // Use the new register.
  216. AssignedReg = Reg;
  217. else if (Reg != AssignedReg) {
  218. // Arrange for uses of AssignedReg to be replaced by uses of Reg.
  219. FuncInfo.RegFixups[AssignedReg] = Reg;
  220. AssignedReg = Reg;
  221. }
  222. return AssignedReg;
  223. }
  224. std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
  225. unsigned IdxN = getRegForValue(Idx);
  226. if (IdxN == 0)
  227. // Unhandled operand. Halt "fast" selection and bail.
  228. return std::pair<unsigned, bool>(0, false);
  229. bool IdxNIsKill = hasTrivialKill(Idx);
  230. // If the index is smaller or larger than intptr_t, truncate or extend it.
  231. MVT PtrVT = TLI.getPointerTy();
  232. EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
  233. if (IdxVT.bitsLT(PtrVT)) {
  234. IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
  235. IdxN, IdxNIsKill);
  236. IdxNIsKill = true;
  237. }
  238. else if (IdxVT.bitsGT(PtrVT)) {
  239. IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
  240. IdxN, IdxNIsKill);
  241. IdxNIsKill = true;
  242. }
  243. return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
  244. }
  245. void FastISel::recomputeInsertPt() {
  246. if (getLastLocalValue()) {
  247. FuncInfo.InsertPt = getLastLocalValue();
  248. FuncInfo.MBB = FuncInfo.InsertPt->getParent();
  249. ++FuncInfo.InsertPt;
  250. } else
  251. FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
  252. // Now skip past any EH_LABELs, which must remain at the beginning.
  253. while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
  254. FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
  255. ++FuncInfo.InsertPt;
  256. }
  257. FastISel::SavePoint FastISel::enterLocalValueArea() {
  258. MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
  259. DebugLoc OldDL = DL;
  260. recomputeInsertPt();
  261. DL = DebugLoc();
  262. SavePoint SP = { OldInsertPt, OldDL };
  263. return SP;
  264. }
  265. void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
  266. if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
  267. LastLocalValue = llvm::prior(FuncInfo.InsertPt);
  268. // Restore the previous insert position.
  269. FuncInfo.InsertPt = OldInsertPt.InsertPt;
  270. DL = OldInsertPt.DL;
  271. }
  272. /// SelectBinaryOp - Select and emit code for a binary operator instruction,
  273. /// which has an opcode which directly corresponds to the given ISD opcode.
  274. ///
  275. bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
  276. EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
  277. if (VT == MVT::Other || !VT.isSimple())
  278. // Unhandled type. Halt "fast" selection and bail.
  279. return false;
  280. // We only handle legal types. For example, on x86-32 the instruction
  281. // selector contains all of the 64-bit instructions from x86-64,
  282. // under the assumption that i64 won't be used if the target doesn't
  283. // support it.
  284. if (!TLI.isTypeLegal(VT)) {
  285. // MVT::i1 is special. Allow AND, OR, or XOR because they
  286. // don't require additional zeroing, which makes them easy.
  287. if (VT == MVT::i1 &&
  288. (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
  289. ISDOpcode == ISD::XOR))
  290. VT = TLI.getTypeToTransformTo(I->getContext(), VT);
  291. else
  292. return false;
  293. }
  294. unsigned Op0 = getRegForValue(I->getOperand(0));
  295. if (Op0 == 0)
  296. // Unhandled operand. Halt "fast" selection and bail.
  297. return false;
  298. bool Op0IsKill = hasTrivialKill(I->getOperand(0));
  299. // Check if the second operand is a constant and handle it appropriately.
  300. if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
  301. unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
  302. ISDOpcode, Op0, Op0IsKill,
  303. CI->getZExtValue());
  304. if (ResultReg != 0) {
  305. // We successfully emitted code for the given LLVM Instruction.
  306. UpdateValueMap(I, ResultReg);
  307. return true;
  308. }
  309. }
  310. // Check if the second operand is a constant float.
  311. if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
  312. unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
  313. ISDOpcode, Op0, Op0IsKill, CF);
  314. if (ResultReg != 0) {
  315. // We successfully emitted code for the given LLVM Instruction.
  316. UpdateValueMap(I, ResultReg);
  317. return true;
  318. }
  319. }
  320. unsigned Op1 = getRegForValue(I->getOperand(1));
  321. if (Op1 == 0)
  322. // Unhandled operand. Halt "fast" selection and bail.
  323. return false;
  324. bool Op1IsKill = hasTrivialKill(I->getOperand(1));
  325. // Now we have both operands in registers. Emit the instruction.
  326. unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
  327. ISDOpcode,
  328. Op0, Op0IsKill,
  329. Op1, Op1IsKill);
  330. if (ResultReg == 0)
  331. // Target-specific code wasn't able to find a machine opcode for
  332. // the given ISD opcode and type. Halt "fast" selection and bail.
  333. return false;
  334. // We successfully emitted code for the given LLVM Instruction.
  335. UpdateValueMap(I, ResultReg);
  336. return true;
  337. }
  338. bool FastISel::SelectGetElementPtr(const User *I) {
  339. unsigned N = getRegForValue(I->getOperand(0));
  340. if (N == 0)
  341. // Unhandled operand. Halt "fast" selection and bail.
  342. return false;
  343. bool NIsKill = hasTrivialKill(I->getOperand(0));
  344. const Type *Ty = I->getOperand(0)->getType();
  345. MVT VT = TLI.getPointerTy();
  346. for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
  347. E = I->op_end(); OI != E; ++OI) {
  348. const Value *Idx = *OI;
  349. if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
  350. unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
  351. if (Field) {
  352. // N = N + Offset
  353. uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
  354. // FIXME: This can be optimized by combining the add with a
  355. // subsequent one.
  356. N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
  357. if (N == 0)
  358. // Unhandled operand. Halt "fast" selection and bail.
  359. return false;
  360. NIsKill = true;
  361. }
  362. Ty = StTy->getElementType(Field);
  363. } else {
  364. Ty = cast<SequentialType>(Ty)->getElementType();
  365. // If this is a constant subscript, handle it quickly.
  366. if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
  367. if (CI->isZero()) continue;
  368. uint64_t Offs =
  369. TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
  370. N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
  371. if (N == 0)
  372. // Unhandled operand. Halt "fast" selection and bail.
  373. return false;
  374. NIsKill = true;
  375. continue;
  376. }
  377. // N = N + Idx * ElementSize;
  378. uint64_t ElementSize = TD.getTypeAllocSize(Ty);
  379. std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
  380. unsigned IdxN = Pair.first;
  381. bool IdxNIsKill = Pair.second;
  382. if (IdxN == 0)
  383. // Unhandled operand. Halt "fast" selection and bail.
  384. return false;
  385. if (ElementSize != 1) {
  386. IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
  387. if (IdxN == 0)
  388. // Unhandled operand. Halt "fast" selection and bail.
  389. return false;
  390. IdxNIsKill = true;
  391. }
  392. N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
  393. if (N == 0)
  394. // Unhandled operand. Halt "fast" selection and bail.
  395. return false;
  396. }
  397. }
  398. // We successfully emitted code for the given LLVM Instruction.
  399. UpdateValueMap(I, N);
  400. return true;
  401. }
  402. bool FastISel::SelectCall(const User *I) {
  403. const Function *F = cast<CallInst>(I)->getCalledFunction();
  404. if (!F) return false;
  405. // Handle selected intrinsic function calls.
  406. unsigned IID = F->getIntrinsicID();
  407. switch (IID) {
  408. default: break;
  409. case Intrinsic::dbg_declare: {
  410. const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
  411. if (!DIVariable(DI->getVariable()).Verify() ||
  412. !FuncInfo.MF->getMMI().hasDebugInfo())
  413. return true;
  414. const Value *Address = DI->getAddress();
  415. if (!Address)
  416. return true;
  417. if (isa<UndefValue>(Address))
  418. return true;
  419. const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
  420. // Don't handle byval struct arguments or VLAs, for example.
  421. if (!AI) {
  422. // Building the map above is target independent. Generating DBG_VALUE
  423. // inline is target dependent; do this now.
  424. DenseMap<const Value *, unsigned>::iterator It =
  425. FuncInfo.ValueMap.find(Address);
  426. if (0 && It != FuncInfo.ValueMap.end()) {
  427. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
  428. TII.get(TargetOpcode::DBG_VALUE))
  429. .addReg(It->second, RegState::Debug).addImm(0).addMetadata(DI->getVariable());
  430. } else
  431. (void)TargetSelectInstruction(cast<Instruction>(I));
  432. }
  433. return true;
  434. }
  435. case Intrinsic::dbg_value: {
  436. // This form of DBG_VALUE is target-independent.
  437. const DbgValueInst *DI = cast<DbgValueInst>(I);
  438. const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
  439. const Value *V = DI->getValue();
  440. if (!V) {
  441. // Currently the optimizer can produce this; insert an undef to
  442. // help debugging. Probably the optimizer should not do this.
  443. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  444. .addReg(0U).addImm(DI->getOffset())
  445. .addMetadata(DI->getVariable());
  446. } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
  447. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  448. .addImm(CI->getZExtValue()).addImm(DI->getOffset())
  449. .addMetadata(DI->getVariable());
  450. } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
  451. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  452. .addFPImm(CF).addImm(DI->getOffset())
  453. .addMetadata(DI->getVariable());
  454. } else if (unsigned Reg = lookUpRegForValue(V)) {
  455. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  456. .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
  457. .addMetadata(DI->getVariable());
  458. } else {
  459. // We can't yet handle anything else here because it would require
  460. // generating code, thus altering codegen because of debug info.
  461. // Insert an undef so we can see what we dropped.
  462. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  463. .addReg(0U).addImm(DI->getOffset())
  464. .addMetadata(DI->getVariable());
  465. }
  466. return true;
  467. }
  468. case Intrinsic::eh_exception: {
  469. EVT VT = TLI.getValueType(I->getType());
  470. switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
  471. default: break;
  472. case TargetLowering::Expand: {
  473. assert(FuncInfo.MBB->isLandingPad() &&
  474. "Call to eh.exception not in landing pad!");
  475. unsigned Reg = TLI.getExceptionAddressRegister();
  476. const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
  477. unsigned ResultReg = createResultReg(RC);
  478. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  479. ResultReg).addReg(Reg);
  480. UpdateValueMap(I, ResultReg);
  481. return true;
  482. }
  483. }
  484. break;
  485. }
  486. case Intrinsic::eh_selector: {
  487. EVT VT = TLI.getValueType(I->getType());
  488. switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
  489. default: break;
  490. case TargetLowering::Expand: {
  491. if (FuncInfo.MBB->isLandingPad())
  492. AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
  493. else {
  494. #ifndef NDEBUG
  495. FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
  496. #endif
  497. // FIXME: Mark exception selector register as live in. Hack for PR1508.
  498. unsigned Reg = TLI.getExceptionSelectorRegister();
  499. if (Reg) FuncInfo.MBB->addLiveIn(Reg);
  500. }
  501. unsigned Reg = TLI.getExceptionSelectorRegister();
  502. EVT SrcVT = TLI.getPointerTy();
  503. const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
  504. unsigned ResultReg = createResultReg(RC);
  505. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  506. ResultReg).addReg(Reg);
  507. bool ResultRegIsKill = hasTrivialKill(I);
  508. // Cast the register to the type of the selector.
  509. if (SrcVT.bitsGT(MVT::i32))
  510. ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
  511. ResultReg, ResultRegIsKill);
  512. else if (SrcVT.bitsLT(MVT::i32))
  513. ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
  514. ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
  515. if (ResultReg == 0)
  516. // Unhandled operand. Halt "fast" selection and bail.
  517. return false;
  518. UpdateValueMap(I, ResultReg);
  519. return true;
  520. }
  521. }
  522. break;
  523. }
  524. }
  525. // An arbitrary call. Bail.
  526. return false;
  527. }
  528. bool FastISel::SelectCast(const User *I, unsigned Opcode) {
  529. EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
  530. EVT DstVT = TLI.getValueType(I->getType());
  531. if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
  532. DstVT == MVT::Other || !DstVT.isSimple())
  533. // Unhandled type. Halt "fast" selection and bail.
  534. return false;
  535. // Check if the destination type is legal. Or as a special case,
  536. // it may be i1 if we're doing a truncate because that's
  537. // easy and somewhat common.
  538. if (!TLI.isTypeLegal(DstVT))
  539. if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
  540. // Unhandled type. Halt "fast" selection and bail.
  541. return false;
  542. // Check if the source operand is legal. Or as a special case,
  543. // it may be i1 if we're doing zero-extension because that's
  544. // easy and somewhat common.
  545. if (!TLI.isTypeLegal(SrcVT))
  546. if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
  547. // Unhandled type. Halt "fast" selection and bail.
  548. return false;
  549. unsigned InputReg = getRegForValue(I->getOperand(0));
  550. if (!InputReg)
  551. // Unhandled operand. Halt "fast" selection and bail.
  552. return false;
  553. bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
  554. // If the operand is i1, arrange for the high bits in the register to be zero.
  555. if (SrcVT == MVT::i1) {
  556. SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
  557. InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
  558. if (!InputReg)
  559. return false;
  560. InputRegIsKill = true;
  561. }
  562. // If the result is i1, truncate to the target's type for i1 first.
  563. if (DstVT == MVT::i1)
  564. DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
  565. unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
  566. DstVT.getSimpleVT(),
  567. Opcode,
  568. InputReg, InputRegIsKill);
  569. if (!ResultReg)
  570. return false;
  571. UpdateValueMap(I, ResultReg);
  572. return true;
  573. }
  574. bool FastISel::SelectBitCast(const User *I) {
  575. // If the bitcast doesn't change the type, just use the operand value.
  576. if (I->getType() == I->getOperand(0)->getType()) {
  577. unsigned Reg = getRegForValue(I->getOperand(0));
  578. if (Reg == 0)
  579. return false;
  580. UpdateValueMap(I, Reg);
  581. return true;
  582. }
  583. // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
  584. EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
  585. EVT DstVT = TLI.getValueType(I->getType());
  586. if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
  587. DstVT == MVT::Other || !DstVT.isSimple() ||
  588. !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
  589. // Unhandled type. Halt "fast" selection and bail.
  590. return false;
  591. unsigned Op0 = getRegForValue(I->getOperand(0));
  592. if (Op0 == 0)
  593. // Unhandled operand. Halt "fast" selection and bail.
  594. return false;
  595. bool Op0IsKill = hasTrivialKill(I->getOperand(0));
  596. // First, try to perform the bitcast by inserting a reg-reg copy.
  597. unsigned ResultReg = 0;
  598. if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
  599. TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
  600. TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
  601. // Don't attempt a cross-class copy. It will likely fail.
  602. if (SrcClass == DstClass) {
  603. ResultReg = createResultReg(DstClass);
  604. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  605. ResultReg).addReg(Op0);
  606. }
  607. }
  608. // If the reg-reg copy failed, select a BIT_CONVERT opcode.
  609. if (!ResultReg)
  610. ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
  611. ISD::BIT_CONVERT, Op0, Op0IsKill);
  612. if (!ResultReg)
  613. return false;
  614. UpdateValueMap(I, ResultReg);
  615. return true;
  616. }
  617. bool
  618. FastISel::SelectInstruction(const Instruction *I) {
  619. // Just before the terminator instruction, insert instructions to
  620. // feed PHI nodes in successor blocks.
  621. if (isa<TerminatorInst>(I))
  622. if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
  623. return false;
  624. DL = I->getDebugLoc();
  625. // First, try doing target-independent selection.
  626. if (SelectOperator(I, I->getOpcode())) {
  627. DL = DebugLoc();
  628. return true;
  629. }
  630. // Next, try calling the target to attempt to handle the instruction.
  631. if (TargetSelectInstruction(I)) {
  632. DL = DebugLoc();
  633. return true;
  634. }
  635. DL = DebugLoc();
  636. return false;
  637. }
  638. /// FastEmitBranch - Emit an unconditional branch to the given block,
  639. /// unless it is the immediate (fall-through) successor, and update
  640. /// the CFG.
  641. void
  642. FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
  643. if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
  644. // The unconditional fall-through case, which needs no instructions.
  645. } else {
  646. // The unconditional branch case.
  647. TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
  648. SmallVector<MachineOperand, 0>(), DL);
  649. }
  650. FuncInfo.MBB->addSuccessor(MSucc);
  651. }
  652. /// SelectFNeg - Emit an FNeg operation.
  653. ///
  654. bool
  655. FastISel::SelectFNeg(const User *I) {
  656. unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
  657. if (OpReg == 0) return false;
  658. bool OpRegIsKill = hasTrivialKill(I);
  659. // If the target has ISD::FNEG, use it.
  660. EVT VT = TLI.getValueType(I->getType());
  661. unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
  662. ISD::FNEG, OpReg, OpRegIsKill);
  663. if (ResultReg != 0) {
  664. UpdateValueMap(I, ResultReg);
  665. return true;
  666. }
  667. // Bitcast the value to integer, twiddle the sign bit with xor,
  668. // and then bitcast it back to floating-point.
  669. if (VT.getSizeInBits() > 64) return false;
  670. EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
  671. if (!TLI.isTypeLegal(IntVT))
  672. return false;
  673. unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
  674. ISD::BIT_CONVERT, OpReg, OpRegIsKill);
  675. if (IntReg == 0)
  676. return false;
  677. unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
  678. IntReg, /*Kill=*/true,
  679. UINT64_C(1) << (VT.getSizeInBits()-1),
  680. IntVT.getSimpleVT());
  681. if (IntResultReg == 0)
  682. return false;
  683. ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
  684. ISD::BIT_CONVERT, IntResultReg, /*Kill=*/true);
  685. if (ResultReg == 0)
  686. return false;
  687. UpdateValueMap(I, ResultReg);
  688. return true;
  689. }
  690. bool
  691. FastISel::SelectOperator(const User *I, unsigned Opcode) {
  692. switch (Opcode) {
  693. case Instruction::Add:
  694. return SelectBinaryOp(I, ISD::ADD);
  695. case Instruction::FAdd:
  696. return SelectBinaryOp(I, ISD::FADD);
  697. case Instruction::Sub:
  698. return SelectBinaryOp(I, ISD::SUB);
  699. case Instruction::FSub:
  700. // FNeg is currently represented in LLVM IR as a special case of FSub.
  701. if (BinaryOperator::isFNeg(I))
  702. return SelectFNeg(I);
  703. return SelectBinaryOp(I, ISD::FSUB);
  704. case Instruction::Mul:
  705. return SelectBinaryOp(I, ISD::MUL);
  706. case Instruction::FMul:
  707. return SelectBinaryOp(I, ISD::FMUL);
  708. case Instruction::SDiv:
  709. return SelectBinaryOp(I, ISD::SDIV);
  710. case Instruction::UDiv:
  711. return SelectBinaryOp(I, ISD::UDIV);
  712. case Instruction::FDiv:
  713. return SelectBinaryOp(I, ISD::FDIV);
  714. case Instruction::SRem:
  715. return SelectBinaryOp(I, ISD::SREM);
  716. case Instruction::URem:
  717. return SelectBinaryOp(I, ISD::UREM);
  718. case Instruction::FRem:
  719. return SelectBinaryOp(I, ISD::FREM);
  720. case Instruction::Shl:
  721. return SelectBinaryOp(I, ISD::SHL);
  722. case Instruction::LShr:
  723. return SelectBinaryOp(I, ISD::SRL);
  724. case Instruction::AShr:
  725. return SelectBinaryOp(I, ISD::SRA);
  726. case Instruction::And:
  727. return SelectBinaryOp(I, ISD::AND);
  728. case Instruction::Or:
  729. return SelectBinaryOp(I, ISD::OR);
  730. case Instruction::Xor:
  731. return SelectBinaryOp(I, ISD::XOR);
  732. case Instruction::GetElementPtr:
  733. return SelectGetElementPtr(I);
  734. case Instruction::Br: {
  735. const BranchInst *BI = cast<BranchInst>(I);
  736. if (BI->isUnconditional()) {
  737. const BasicBlock *LLVMSucc = BI->getSuccessor(0);
  738. MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
  739. FastEmitBranch(MSucc, BI->getDebugLoc());
  740. return true;
  741. }
  742. // Conditional branches are not handed yet.
  743. // Halt "fast" selection and bail.
  744. return false;
  745. }
  746. case Instruction::Unreachable:
  747. // Nothing to emit.
  748. return true;
  749. case Instruction::Alloca:
  750. // FunctionLowering has the static-sized case covered.
  751. if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
  752. return true;
  753. // Dynamic-sized alloca is not handled yet.
  754. return false;
  755. case Instruction::Call:
  756. return SelectCall(I);
  757. case Instruction::BitCast:
  758. return SelectBitCast(I);
  759. case Instruction::FPToSI:
  760. return SelectCast(I, ISD::FP_TO_SINT);
  761. case Instruction::ZExt:
  762. return SelectCast(I, ISD::ZERO_EXTEND);
  763. case Instruction::SExt:
  764. return SelectCast(I, ISD::SIGN_EXTEND);
  765. case Instruction::Trunc:
  766. return SelectCast(I, ISD::TRUNCATE);
  767. case Instruction::SIToFP:
  768. return SelectCast(I, ISD::SINT_TO_FP);
  769. case Instruction::IntToPtr: // Deliberate fall-through.
  770. case Instruction::PtrToInt: {
  771. EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
  772. EVT DstVT = TLI.getValueType(I->getType());
  773. if (DstVT.bitsGT(SrcVT))
  774. return SelectCast(I, ISD::ZERO_EXTEND);
  775. if (DstVT.bitsLT(SrcVT))
  776. return SelectCast(I, ISD::TRUNCATE);
  777. unsigned Reg = getRegForValue(I->getOperand(0));
  778. if (Reg == 0) return false;
  779. UpdateValueMap(I, Reg);
  780. return true;
  781. }
  782. case Instruction::PHI:
  783. llvm_unreachable("FastISel shouldn't visit PHI nodes!");
  784. default:
  785. // Unhandled instruction. Halt "fast" selection and bail.
  786. return false;
  787. }
  788. }
  789. FastISel::FastISel(FunctionLoweringInfo &funcInfo)
  790. : FuncInfo(funcInfo),
  791. MRI(FuncInfo.MF->getRegInfo()),
  792. MFI(*FuncInfo.MF->getFrameInfo()),
  793. MCP(*FuncInfo.MF->getConstantPool()),
  794. TM(FuncInfo.MF->getTarget()),
  795. TD(*TM.getTargetData()),
  796. TII(*TM.getInstrInfo()),
  797. TLI(*TM.getTargetLowering()),
  798. TRI(*TM.getRegisterInfo()) {
  799. }
  800. FastISel::~FastISel() {}
  801. unsigned FastISel::FastEmit_(MVT, MVT,
  802. unsigned) {
  803. return 0;
  804. }
  805. unsigned FastISel::FastEmit_r(MVT, MVT,
  806. unsigned,
  807. unsigned /*Op0*/, bool /*Op0IsKill*/) {
  808. return 0;
  809. }
  810. unsigned FastISel::FastEmit_rr(MVT, MVT,
  811. unsigned,
  812. unsigned /*Op0*/, bool /*Op0IsKill*/,
  813. unsigned /*Op1*/, bool /*Op1IsKill*/) {
  814. return 0;
  815. }
  816. unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
  817. return 0;
  818. }
  819. unsigned FastISel::FastEmit_f(MVT, MVT,
  820. unsigned, const ConstantFP * /*FPImm*/) {
  821. return 0;
  822. }
  823. unsigned FastISel::FastEmit_ri(MVT, MVT,
  824. unsigned,
  825. unsigned /*Op0*/, bool /*Op0IsKill*/,
  826. uint64_t /*Imm*/) {
  827. return 0;
  828. }
  829. unsigned FastISel::FastEmit_rf(MVT, MVT,
  830. unsigned,
  831. unsigned /*Op0*/, bool /*Op0IsKill*/,
  832. const ConstantFP * /*FPImm*/) {
  833. return 0;
  834. }
  835. unsigned FastISel::FastEmit_rri(MVT, MVT,
  836. unsigned,
  837. unsigned /*Op0*/, bool /*Op0IsKill*/,
  838. unsigned /*Op1*/, bool /*Op1IsKill*/,
  839. uint64_t /*Imm*/) {
  840. return 0;
  841. }
  842. /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
  843. /// to emit an instruction with an immediate operand using FastEmit_ri.
  844. /// If that fails, it materializes the immediate into a register and try
  845. /// FastEmit_rr instead.
  846. unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
  847. unsigned Op0, bool Op0IsKill,
  848. uint64_t Imm, MVT ImmType) {
  849. // First check if immediate type is legal. If not, we can't use the ri form.
  850. unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
  851. if (ResultReg != 0)
  852. return ResultReg;
  853. unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
  854. if (MaterialReg == 0)
  855. return 0;
  856. return FastEmit_rr(VT, VT, Opcode,
  857. Op0, Op0IsKill,
  858. MaterialReg, /*Kill=*/true);
  859. }
  860. /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
  861. /// to emit an instruction with a floating-point immediate operand using
  862. /// FastEmit_rf. If that fails, it materializes the immediate into a register
  863. /// and try FastEmit_rr instead.
  864. unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
  865. unsigned Op0, bool Op0IsKill,
  866. const ConstantFP *FPImm, MVT ImmType) {
  867. // First check if immediate type is legal. If not, we can't use the rf form.
  868. unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, Op0IsKill, FPImm);
  869. if (ResultReg != 0)
  870. return ResultReg;
  871. // Materialize the constant in a register.
  872. unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
  873. if (MaterialReg == 0) {
  874. // If the target doesn't have a way to directly enter a floating-point
  875. // value into a register, use an alternate approach.
  876. // TODO: The current approach only supports floating-point constants
  877. // that can be constructed by conversion from integer values. This should
  878. // be replaced by code that creates a load from a constant-pool entry,
  879. // which will require some target-specific work.
  880. const APFloat &Flt = FPImm->getValueAPF();
  881. EVT IntVT = TLI.getPointerTy();
  882. uint64_t x[2];
  883. uint32_t IntBitWidth = IntVT.getSizeInBits();
  884. bool isExact;
  885. (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
  886. APFloat::rmTowardZero, &isExact);
  887. if (!isExact)
  888. return 0;
  889. APInt IntVal(IntBitWidth, 2, x);
  890. unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
  891. ISD::Constant, IntVal.getZExtValue());
  892. if (IntegerReg == 0)
  893. return 0;
  894. MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
  895. ISD::SINT_TO_FP, IntegerReg, /*Kill=*/true);
  896. if (MaterialReg == 0)
  897. return 0;
  898. }
  899. return FastEmit_rr(VT, VT, Opcode,
  900. Op0, Op0IsKill,
  901. MaterialReg, /*Kill=*/true);
  902. }
  903. unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
  904. return MRI.createVirtualRegister(RC);
  905. }
  906. unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
  907. const TargetRegisterClass* RC) {
  908. unsigned ResultReg = createResultReg(RC);
  909. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  910. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
  911. return ResultReg;
  912. }
  913. unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
  914. const TargetRegisterClass *RC,
  915. unsigned Op0, bool Op0IsKill) {
  916. unsigned ResultReg = createResultReg(RC);
  917. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  918. if (II.getNumDefs() >= 1)
  919. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
  920. .addReg(Op0, Op0IsKill * RegState::Kill);
  921. else {
  922. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  923. .addReg(Op0, Op0IsKill * RegState::Kill);
  924. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  925. ResultReg).addReg(II.ImplicitDefs[0]);
  926. }
  927. return ResultReg;
  928. }
  929. unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
  930. const TargetRegisterClass *RC,
  931. unsigned Op0, bool Op0IsKill,
  932. unsigned Op1, bool Op1IsKill) {
  933. unsigned ResultReg = createResultReg(RC);
  934. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  935. if (II.getNumDefs() >= 1)
  936. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
  937. .addReg(Op0, Op0IsKill * RegState::Kill)
  938. .addReg(Op1, Op1IsKill * RegState::Kill);
  939. else {
  940. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  941. .addReg(Op0, Op0IsKill * RegState::Kill)
  942. .addReg(Op1, Op1IsKill * RegState::Kill);
  943. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  944. ResultReg).addReg(II.ImplicitDefs[0]);
  945. }
  946. return ResultReg;
  947. }
  948. unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
  949. const TargetRegisterClass *RC,
  950. unsigned Op0, bool Op0IsKill,
  951. uint64_t Imm) {
  952. unsigned ResultReg = createResultReg(RC);
  953. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  954. if (II.getNumDefs() >= 1)
  955. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
  956. .addReg(Op0, Op0IsKill * RegState::Kill)
  957. .addImm(Imm);
  958. else {
  959. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  960. .addReg(Op0, Op0IsKill * RegState::Kill)
  961. .addImm(Imm);
  962. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  963. ResultReg).addReg(II.ImplicitDefs[0]);
  964. }
  965. return ResultReg;
  966. }
  967. unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
  968. const TargetRegisterClass *RC,
  969. unsigned Op0, bool Op0IsKill,
  970. const ConstantFP *FPImm) {
  971. unsigned ResultReg = createResultReg(RC);
  972. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  973. if (II.getNumDefs() >= 1)
  974. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
  975. .addReg(Op0, Op0IsKill * RegState::Kill)
  976. .addFPImm(FPImm);
  977. else {
  978. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  979. .addReg(Op0, Op0IsKill * RegState::Kill)
  980. .addFPImm(FPImm);
  981. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  982. ResultReg).addReg(II.ImplicitDefs[0]);
  983. }
  984. return ResultReg;
  985. }
  986. unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
  987. const TargetRegisterClass *RC,
  988. unsigned Op0, bool Op0IsKill,
  989. unsigned Op1, bool Op1IsKill,
  990. uint64_t Imm) {
  991. unsigned ResultReg = createResultReg(RC);
  992. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  993. if (II.getNumDefs() >= 1)
  994. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
  995. .addReg(Op0, Op0IsKill * RegState::Kill)
  996. .addReg(Op1, Op1IsKill * RegState::Kill)
  997. .addImm(Imm);
  998. else {
  999. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
  1000. .addReg(Op0, Op0IsKill * RegState::Kill)
  1001. .addReg(Op1, Op1IsKill * RegState::Kill)
  1002. .addImm(Imm);
  1003. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  1004. ResultReg).addReg(II.ImplicitDefs[0]);
  1005. }
  1006. return ResultReg;
  1007. }
  1008. unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
  1009. const TargetRegisterClass *RC,
  1010. uint64_t Imm) {
  1011. unsigned ResultReg = createResultReg(RC);
  1012. const TargetInstrDesc &II = TII.get(MachineInstOpcode);
  1013. if (II.getNumDefs() >= 1)
  1014. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
  1015. else {
  1016. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
  1017. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
  1018. ResultReg).addReg(II.ImplicitDefs[0]);
  1019. }
  1020. return ResultReg;
  1021. }
  1022. unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
  1023. unsigned Op0, bool Op0IsKill,
  1024. uint32_t Idx) {
  1025. unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
  1026. assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
  1027. "Cannot yet extract from physregs");
  1028. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
  1029. DL, TII.get(TargetOpcode::COPY), ResultReg)
  1030. .addReg(Op0, getKillRegState(Op0IsKill), Idx);
  1031. return ResultReg;
  1032. }
  1033. /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
  1034. /// with all but the least significant bit set to zero.
  1035. unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
  1036. return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
  1037. }
  1038. /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
  1039. /// Emit code to ensure constants are copied into registers when needed.
  1040. /// Remember the virtual registers that need to be added to the Machine PHI
  1041. /// nodes as input. We cannot just directly add them, because expansion
  1042. /// might result in multiple MBB's for one BB. As such, the start of the
  1043. /// BB might correspond to a different MBB than the end.
  1044. bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
  1045. const TerminatorInst *TI = LLVMBB->getTerminator();
  1046. SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
  1047. unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
  1048. // Check successor nodes' PHI nodes that expect a constant to be available
  1049. // from this block.
  1050. for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
  1051. const BasicBlock *SuccBB = TI->getSuccessor(succ);
  1052. if (!isa<PHINode>(SuccBB->begin())) continue;
  1053. MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
  1054. // If this terminator has multiple identical successors (common for
  1055. // switches), only handle each succ once.
  1056. if (!SuccsHandled.insert(SuccMBB)) continue;
  1057. MachineBasicBlock::iterator MBBI = SuccMBB->begin();
  1058. // At this point we know that there is a 1-1 correspondence between LLVM PHI
  1059. // nodes and Machine PHI nodes, but the incoming operands have not been
  1060. // emitted yet.
  1061. for (BasicBlock::const_iterator I = SuccBB->begin();
  1062. const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
  1063. // Ignore dead phi's.
  1064. if (PN->use_empty()) continue;
  1065. // Only handle legal types. Two interesting things to note here. First,
  1066. // by bailing out early, we may leave behind some dead instructions,
  1067. // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
  1068. // own moves. Second, this check is necessary becuase FastISel doesn't
  1069. // use CreateRegs to create registers, so it always creates
  1070. // exactly one register for each non-void instruction.
  1071. EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
  1072. if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
  1073. // Promote MVT::i1.
  1074. if (VT == MVT::i1)
  1075. VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
  1076. else {
  1077. FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
  1078. return false;
  1079. }
  1080. }
  1081. const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
  1082. // Set the DebugLoc for the copy. Prefer the location of the operand
  1083. // if there is one; use the location of the PHI otherwise.
  1084. DL = PN->getDebugLoc();
  1085. if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
  1086. DL = Inst->getDebugLoc();
  1087. unsigned Reg = getRegForValue(PHIOp);
  1088. if (Reg == 0) {
  1089. FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
  1090. return false;
  1091. }
  1092. FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
  1093. DL = DebugLoc();
  1094. }
  1095. }
  1096. return true;
  1097. }