LiveIntervalAnalysis.cpp 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059
  1. //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements the LiveInterval analysis pass which is used
  11. // by the Linear Scan Register allocator. This pass linearizes the
  12. // basic blocks of the function in DFS order and uses the
  13. // LiveVariables pass to conservatively compute live intervals for
  14. // each virtual and physical register.
  15. //
  16. //===----------------------------------------------------------------------===//
  17. #define DEBUG_TYPE "liveintervals"
  18. #include "llvm/CodeGen/LiveIntervalAnalysis.h"
  19. #include "VirtRegMap.h"
  20. #include "llvm/Value.h"
  21. #include "llvm/Analysis/AliasAnalysis.h"
  22. #include "llvm/CodeGen/LiveVariables.h"
  23. #include "llvm/CodeGen/MachineFrameInfo.h"
  24. #include "llvm/CodeGen/MachineInstr.h"
  25. #include "llvm/CodeGen/MachineInstrBuilder.h"
  26. #include "llvm/CodeGen/MachineLoopInfo.h"
  27. #include "llvm/CodeGen/MachineMemOperand.h"
  28. #include "llvm/CodeGen/MachineRegisterInfo.h"
  29. #include "llvm/CodeGen/Passes.h"
  30. #include "llvm/CodeGen/ProcessImplicitDefs.h"
  31. #include "llvm/Target/TargetRegisterInfo.h"
  32. #include "llvm/Target/TargetInstrInfo.h"
  33. #include "llvm/Target/TargetMachine.h"
  34. #include "llvm/Target/TargetOptions.h"
  35. #include "llvm/Support/CommandLine.h"
  36. #include "llvm/Support/Debug.h"
  37. #include "llvm/Support/ErrorHandling.h"
  38. #include "llvm/Support/raw_ostream.h"
  39. #include "llvm/ADT/DepthFirstIterator.h"
  40. #include "llvm/ADT/SmallSet.h"
  41. #include "llvm/ADT/Statistic.h"
  42. #include "llvm/ADT/STLExtras.h"
  43. #include <algorithm>
  44. #include <limits>
  45. #include <cmath>
  46. using namespace llvm;
  47. // Hidden options for help debugging.
  48. static cl::opt<bool> DisableReMat("disable-rematerialization",
  49. cl::init(false), cl::Hidden);
  50. static cl::opt<bool> EnableFastSpilling("fast-spill",
  51. cl::init(false), cl::Hidden);
  52. STATISTIC(numIntervals , "Number of original intervals");
  53. STATISTIC(numFolds , "Number of loads/stores folded into instructions");
  54. STATISTIC(numSplits , "Number of intervals split");
  55. char LiveIntervals::ID = 0;
  56. static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
  57. void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
  58. AU.setPreservesCFG();
  59. AU.addRequired<AliasAnalysis>();
  60. AU.addPreserved<AliasAnalysis>();
  61. AU.addPreserved<LiveVariables>();
  62. AU.addRequired<LiveVariables>();
  63. AU.addPreservedID(MachineLoopInfoID);
  64. AU.addPreservedID(MachineDominatorsID);
  65. if (!StrongPHIElim) {
  66. AU.addPreservedID(PHIEliminationID);
  67. AU.addRequiredID(PHIEliminationID);
  68. }
  69. AU.addRequiredID(TwoAddressInstructionPassID);
  70. AU.addPreserved<ProcessImplicitDefs>();
  71. AU.addRequired<ProcessImplicitDefs>();
  72. AU.addPreserved<SlotIndexes>();
  73. AU.addRequiredTransitive<SlotIndexes>();
  74. MachineFunctionPass::getAnalysisUsage(AU);
  75. }
  76. void LiveIntervals::releaseMemory() {
  77. // Free the live intervals themselves.
  78. for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
  79. E = r2iMap_.end(); I != E; ++I)
  80. delete I->second;
  81. r2iMap_.clear();
  82. // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
  83. VNInfoAllocator.Reset();
  84. while (!CloneMIs.empty()) {
  85. MachineInstr *MI = CloneMIs.back();
  86. CloneMIs.pop_back();
  87. mf_->DeleteMachineInstr(MI);
  88. }
  89. }
  90. /// runOnMachineFunction - Register allocate the whole function
  91. ///
  92. bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
  93. mf_ = &fn;
  94. mri_ = &mf_->getRegInfo();
  95. tm_ = &fn.getTarget();
  96. tri_ = tm_->getRegisterInfo();
  97. tii_ = tm_->getInstrInfo();
  98. aa_ = &getAnalysis<AliasAnalysis>();
  99. lv_ = &getAnalysis<LiveVariables>();
  100. indexes_ = &getAnalysis<SlotIndexes>();
  101. allocatableRegs_ = tri_->getAllocatableSet(fn);
  102. computeIntervals();
  103. numIntervals += getNumIntervals();
  104. DEBUG(dump());
  105. return true;
  106. }
  107. /// print - Implement the dump method.
  108. void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
  109. OS << "********** INTERVALS **********\n";
  110. for (const_iterator I = begin(), E = end(); I != E; ++I) {
  111. I->second->print(OS, tri_);
  112. OS << "\n";
  113. }
  114. printInstrs(OS);
  115. }
  116. void LiveIntervals::printInstrs(raw_ostream &OS) const {
  117. OS << "********** MACHINEINSTRS **********\n";
  118. for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
  119. mbbi != mbbe; ++mbbi) {
  120. OS << "BB#" << mbbi->getNumber()
  121. << ":\t\t# derived from " << mbbi->getName() << "\n";
  122. for (MachineBasicBlock::iterator mii = mbbi->begin(),
  123. mie = mbbi->end(); mii != mie; ++mii) {
  124. OS << getInstructionIndex(mii) << '\t' << *mii;
  125. }
  126. }
  127. }
  128. void LiveIntervals::dumpInstrs() const {
  129. printInstrs(errs());
  130. }
  131. /// conflictsWithPhysRegDef - Returns true if the specified register
  132. /// is defined during the duration of the specified interval.
  133. bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
  134. VirtRegMap &vrm, unsigned reg) {
  135. for (LiveInterval::Ranges::const_iterator
  136. I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
  137. for (SlotIndex index = I->start.getBaseIndex(),
  138. end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
  139. index != end;
  140. index = index.getNextIndex()) {
  141. MachineInstr *MI = getInstructionFromIndex(index);
  142. if (!MI)
  143. continue; // skip deleted instructions
  144. unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
  145. if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
  146. if (SrcReg == li.reg || DstReg == li.reg)
  147. continue;
  148. for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
  149. MachineOperand& mop = MI->getOperand(i);
  150. if (!mop.isReg())
  151. continue;
  152. unsigned PhysReg = mop.getReg();
  153. if (PhysReg == 0 || PhysReg == li.reg)
  154. continue;
  155. if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
  156. if (!vrm.hasPhys(PhysReg))
  157. continue;
  158. PhysReg = vrm.getPhys(PhysReg);
  159. }
  160. if (PhysReg && tri_->regsOverlap(PhysReg, reg))
  161. return true;
  162. }
  163. }
  164. }
  165. return false;
  166. }
  167. /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
  168. /// it can check use as well.
  169. bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
  170. unsigned Reg, bool CheckUse,
  171. SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
  172. for (LiveInterval::Ranges::const_iterator
  173. I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
  174. for (SlotIndex index = I->start.getBaseIndex(),
  175. end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
  176. index != end;
  177. index = index.getNextIndex()) {
  178. MachineInstr *MI = getInstructionFromIndex(index);
  179. if (!MI)
  180. continue; // skip deleted instructions
  181. if (JoinedCopies.count(MI))
  182. continue;
  183. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  184. MachineOperand& MO = MI->getOperand(i);
  185. if (!MO.isReg())
  186. continue;
  187. if (MO.isUse() && !CheckUse)
  188. continue;
  189. unsigned PhysReg = MO.getReg();
  190. if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
  191. continue;
  192. if (tri_->isSubRegister(Reg, PhysReg))
  193. return true;
  194. }
  195. }
  196. }
  197. return false;
  198. }
  199. #ifndef NDEBUG
  200. static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
  201. if (TargetRegisterInfo::isPhysicalRegister(reg))
  202. errs() << tri_->getName(reg);
  203. else
  204. errs() << "%reg" << reg;
  205. }
  206. #endif
  207. void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
  208. MachineBasicBlock::iterator mi,
  209. SlotIndex MIIdx,
  210. MachineOperand& MO,
  211. unsigned MOIdx,
  212. LiveInterval &interval) {
  213. DEBUG({
  214. errs() << "\t\tregister: ";
  215. printRegName(interval.reg, tri_);
  216. });
  217. // Virtual registers may be defined multiple times (due to phi
  218. // elimination and 2-addr elimination). Much of what we do only has to be
  219. // done once for the vreg. We use an empty interval to detect the first
  220. // time we see a vreg.
  221. LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
  222. if (interval.empty()) {
  223. // Get the Idx of the defining instructions.
  224. SlotIndex defIndex = MIIdx.getDefIndex();
  225. // Earlyclobbers move back one, so that they overlap the live range
  226. // of inputs.
  227. if (MO.isEarlyClobber())
  228. defIndex = MIIdx.getUseIndex();
  229. VNInfo *ValNo;
  230. MachineInstr *CopyMI = NULL;
  231. unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
  232. if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
  233. mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
  234. mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
  235. tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
  236. CopyMI = mi;
  237. // Earlyclobbers move back one.
  238. ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
  239. assert(ValNo->id == 0 && "First value in interval is not 0?");
  240. // Loop over all of the blocks that the vreg is defined in. There are
  241. // two cases we have to handle here. The most common case is a vreg
  242. // whose lifetime is contained within a basic block. In this case there
  243. // will be a single kill, in MBB, which comes after the definition.
  244. if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
  245. // FIXME: what about dead vars?
  246. SlotIndex killIdx;
  247. if (vi.Kills[0] != mi)
  248. killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
  249. else
  250. killIdx = defIndex.getStoreIndex();
  251. // If the kill happens after the definition, we have an intra-block
  252. // live range.
  253. if (killIdx > defIndex) {
  254. assert(vi.AliveBlocks.empty() &&
  255. "Shouldn't be alive across any blocks!");
  256. LiveRange LR(defIndex, killIdx, ValNo);
  257. interval.addRange(LR);
  258. DEBUG(errs() << " +" << LR << "\n");
  259. ValNo->addKill(killIdx);
  260. return;
  261. }
  262. }
  263. // The other case we handle is when a virtual register lives to the end
  264. // of the defining block, potentially live across some blocks, then is
  265. // live into some number of blocks, but gets killed. Start by adding a
  266. // range that goes from this definition to the end of the defining block.
  267. LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
  268. ValNo);
  269. DEBUG(errs() << " +" << NewLR);
  270. interval.addRange(NewLR);
  271. // Iterate over all of the blocks that the variable is completely
  272. // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
  273. // live interval.
  274. for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
  275. E = vi.AliveBlocks.end(); I != E; ++I) {
  276. LiveRange LR(
  277. getMBBStartIdx(mf_->getBlockNumbered(*I)),
  278. getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
  279. ValNo);
  280. interval.addRange(LR);
  281. DEBUG(errs() << " +" << LR);
  282. }
  283. // Finally, this virtual register is live from the start of any killing
  284. // block to the 'use' slot of the killing instruction.
  285. for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
  286. MachineInstr *Kill = vi.Kills[i];
  287. SlotIndex killIdx =
  288. getInstructionIndex(Kill).getDefIndex();
  289. LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
  290. interval.addRange(LR);
  291. ValNo->addKill(killIdx);
  292. DEBUG(errs() << " +" << LR);
  293. }
  294. } else {
  295. // If this is the second time we see a virtual register definition, it
  296. // must be due to phi elimination or two addr elimination. If this is
  297. // the result of two address elimination, then the vreg is one of the
  298. // def-and-use register operand.
  299. if (mi->isRegTiedToUseOperand(MOIdx)) {
  300. // If this is a two-address definition, then we have already processed
  301. // the live range. The only problem is that we didn't realize there
  302. // are actually two values in the live interval. Because of this we
  303. // need to take the LiveRegion that defines this register and split it
  304. // into two values.
  305. assert(interval.containsOneValue());
  306. SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
  307. SlotIndex RedefIndex = MIIdx.getDefIndex();
  308. if (MO.isEarlyClobber())
  309. RedefIndex = MIIdx.getUseIndex();
  310. const LiveRange *OldLR =
  311. interval.getLiveRangeContaining(RedefIndex.getUseIndex());
  312. VNInfo *OldValNo = OldLR->valno;
  313. // Delete the initial value, which should be short and continuous,
  314. // because the 2-addr copy must be in the same MBB as the redef.
  315. interval.removeRange(DefIndex, RedefIndex);
  316. // Two-address vregs should always only be redefined once. This means
  317. // that at this point, there should be exactly one value number in it.
  318. assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
  319. // The new value number (#1) is defined by the instruction we claimed
  320. // defined value #0.
  321. VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
  322. false, // update at *
  323. VNInfoAllocator);
  324. ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
  325. // Value#0 is now defined by the 2-addr instruction.
  326. OldValNo->def = RedefIndex;
  327. OldValNo->setCopy(0);
  328. // Add the new live interval which replaces the range for the input copy.
  329. LiveRange LR(DefIndex, RedefIndex, ValNo);
  330. DEBUG(errs() << " replace range with " << LR);
  331. interval.addRange(LR);
  332. ValNo->addKill(RedefIndex);
  333. // If this redefinition is dead, we need to add a dummy unit live
  334. // range covering the def slot.
  335. if (MO.isDead())
  336. interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
  337. OldValNo));
  338. DEBUG({
  339. errs() << " RESULT: ";
  340. interval.print(errs(), tri_);
  341. });
  342. } else {
  343. // Otherwise, this must be because of phi elimination. If this is the
  344. // first redefinition of the vreg that we have seen, go back and change
  345. // the live range in the PHI block to be a different value number.
  346. if (interval.containsOneValue()) {
  347. // Remove the old range that we now know has an incorrect number.
  348. VNInfo *VNI = interval.getValNumInfo(0);
  349. MachineInstr *Killer = vi.Kills[0];
  350. SlotIndex Start = getMBBStartIdx(Killer->getParent());
  351. SlotIndex End = getInstructionIndex(Killer).getDefIndex();
  352. DEBUG({
  353. errs() << " Removing [" << Start << "," << End << "] from: ";
  354. interval.print(errs(), tri_);
  355. errs() << "\n";
  356. });
  357. interval.removeRange(Start, End);
  358. assert(interval.ranges.size() == 1 &&
  359. "Newly discovered PHI interval has >1 ranges.");
  360. MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
  361. VNI->addKill(indexes_->getTerminatorGap(killMBB));
  362. VNI->setHasPHIKill(true);
  363. DEBUG({
  364. errs() << " RESULT: ";
  365. interval.print(errs(), tri_);
  366. });
  367. // Replace the interval with one of a NEW value number. Note that this
  368. // value number isn't actually defined by an instruction, weird huh? :)
  369. LiveRange LR(Start, End,
  370. interval.getNextValue(SlotIndex(getMBBStartIdx(Killer->getParent()), true),
  371. 0, false, VNInfoAllocator));
  372. LR.valno->setIsPHIDef(true);
  373. DEBUG(errs() << " replace range with " << LR);
  374. interval.addRange(LR);
  375. LR.valno->addKill(End);
  376. DEBUG({
  377. errs() << " RESULT: ";
  378. interval.print(errs(), tri_);
  379. });
  380. }
  381. // In the case of PHI elimination, each variable definition is only
  382. // live until the end of the block. We've already taken care of the
  383. // rest of the live range.
  384. SlotIndex defIndex = MIIdx.getDefIndex();
  385. if (MO.isEarlyClobber())
  386. defIndex = MIIdx.getUseIndex();
  387. VNInfo *ValNo;
  388. MachineInstr *CopyMI = NULL;
  389. unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
  390. if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
  391. mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
  392. mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
  393. tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
  394. CopyMI = mi;
  395. ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
  396. SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
  397. LiveRange LR(defIndex, killIndex, ValNo);
  398. interval.addRange(LR);
  399. ValNo->addKill(indexes_->getTerminatorGap(mbb));
  400. ValNo->setHasPHIKill(true);
  401. DEBUG(errs() << " +" << LR);
  402. }
  403. }
  404. DEBUG(errs() << '\n');
  405. }
  406. void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
  407. MachineBasicBlock::iterator mi,
  408. SlotIndex MIIdx,
  409. MachineOperand& MO,
  410. LiveInterval &interval,
  411. MachineInstr *CopyMI) {
  412. // A physical register cannot be live across basic block, so its
  413. // lifetime must end somewhere in its defining basic block.
  414. DEBUG({
  415. errs() << "\t\tregister: ";
  416. printRegName(interval.reg, tri_);
  417. });
  418. SlotIndex baseIndex = MIIdx;
  419. SlotIndex start = baseIndex.getDefIndex();
  420. // Earlyclobbers move back one.
  421. if (MO.isEarlyClobber())
  422. start = MIIdx.getUseIndex();
  423. SlotIndex end = start;
  424. // If it is not used after definition, it is considered dead at
  425. // the instruction defining it. Hence its interval is:
  426. // [defSlot(def), defSlot(def)+1)
  427. // For earlyclobbers, the defSlot was pushed back one; the extra
  428. // advance below compensates.
  429. if (MO.isDead()) {
  430. DEBUG(errs() << " dead");
  431. end = start.getStoreIndex();
  432. goto exit;
  433. }
  434. // If it is not dead on definition, it must be killed by a
  435. // subsequent instruction. Hence its interval is:
  436. // [defSlot(def), useSlot(kill)+1)
  437. baseIndex = baseIndex.getNextIndex();
  438. while (++mi != MBB->end()) {
  439. if (getInstructionFromIndex(baseIndex) == 0)
  440. baseIndex = indexes_->getNextNonNullIndex(baseIndex);
  441. if (mi->killsRegister(interval.reg, tri_)) {
  442. DEBUG(errs() << " killed");
  443. end = baseIndex.getDefIndex();
  444. goto exit;
  445. } else {
  446. int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
  447. if (DefIdx != -1) {
  448. if (mi->isRegTiedToUseOperand(DefIdx)) {
  449. // Two-address instruction.
  450. end = baseIndex.getDefIndex();
  451. } else {
  452. // Another instruction redefines the register before it is ever read.
  453. // Then the register is essentially dead at the instruction that defines
  454. // it. Hence its interval is:
  455. // [defSlot(def), defSlot(def)+1)
  456. DEBUG(errs() << " dead");
  457. end = start.getStoreIndex();
  458. }
  459. goto exit;
  460. }
  461. }
  462. baseIndex = baseIndex.getNextIndex();
  463. }
  464. // The only case we should have a dead physreg here without a killing or
  465. // instruction where we know it's dead is if it is live-in to the function
  466. // and never used. Another possible case is the implicit use of the
  467. // physical register has been deleted by two-address pass.
  468. end = start.getStoreIndex();
  469. exit:
  470. assert(start < end && "did not find end of interval?");
  471. // Already exists? Extend old live interval.
  472. LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
  473. bool Extend = OldLR != interval.end();
  474. VNInfo *ValNo = Extend
  475. ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
  476. if (MO.isEarlyClobber() && Extend)
  477. ValNo->setHasRedefByEC(true);
  478. LiveRange LR(start, end, ValNo);
  479. interval.addRange(LR);
  480. LR.valno->addKill(end);
  481. DEBUG(errs() << " +" << LR << '\n');
  482. }
  483. void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
  484. MachineBasicBlock::iterator MI,
  485. SlotIndex MIIdx,
  486. MachineOperand& MO,
  487. unsigned MOIdx) {
  488. if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
  489. handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
  490. getOrCreateInterval(MO.getReg()));
  491. else if (allocatableRegs_[MO.getReg()]) {
  492. MachineInstr *CopyMI = NULL;
  493. unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
  494. if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
  495. MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
  496. MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
  497. tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
  498. CopyMI = MI;
  499. handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
  500. getOrCreateInterval(MO.getReg()), CopyMI);
  501. // Def of a register also defines its sub-registers.
  502. for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
  503. // If MI also modifies the sub-register explicitly, avoid processing it
  504. // more than once. Do not pass in TRI here so it checks for exact match.
  505. if (!MI->modifiesRegister(*AS))
  506. handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
  507. getOrCreateInterval(*AS), 0);
  508. }
  509. }
  510. void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
  511. SlotIndex MIIdx,
  512. LiveInterval &interval, bool isAlias) {
  513. DEBUG({
  514. errs() << "\t\tlivein register: ";
  515. printRegName(interval.reg, tri_);
  516. });
  517. // Look for kills, if it reaches a def before it's killed, then it shouldn't
  518. // be considered a livein.
  519. MachineBasicBlock::iterator mi = MBB->begin();
  520. SlotIndex baseIndex = MIIdx;
  521. SlotIndex start = baseIndex;
  522. if (getInstructionFromIndex(baseIndex) == 0)
  523. baseIndex = indexes_->getNextNonNullIndex(baseIndex);
  524. SlotIndex end = baseIndex;
  525. bool SeenDefUse = false;
  526. while (mi != MBB->end()) {
  527. if (mi->killsRegister(interval.reg, tri_)) {
  528. DEBUG(errs() << " killed");
  529. end = baseIndex.getDefIndex();
  530. SeenDefUse = true;
  531. break;
  532. } else if (mi->modifiesRegister(interval.reg, tri_)) {
  533. // Another instruction redefines the register before it is ever read.
  534. // Then the register is essentially dead at the instruction that defines
  535. // it. Hence its interval is:
  536. // [defSlot(def), defSlot(def)+1)
  537. DEBUG(errs() << " dead");
  538. end = start.getStoreIndex();
  539. SeenDefUse = true;
  540. break;
  541. }
  542. ++mi;
  543. if (mi != MBB->end()) {
  544. baseIndex = indexes_->getNextNonNullIndex(baseIndex);
  545. }
  546. }
  547. // Live-in register might not be used at all.
  548. if (!SeenDefUse) {
  549. if (isAlias) {
  550. DEBUG(errs() << " dead");
  551. end = MIIdx.getStoreIndex();
  552. } else {
  553. DEBUG(errs() << " live through");
  554. end = baseIndex;
  555. }
  556. }
  557. VNInfo *vni =
  558. interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
  559. 0, false, VNInfoAllocator);
  560. vni->setIsPHIDef(true);
  561. LiveRange LR(start, end, vni);
  562. interval.addRange(LR);
  563. LR.valno->addKill(end);
  564. DEBUG(errs() << " +" << LR << '\n');
  565. }
  566. /// computeIntervals - computes the live intervals for virtual
  567. /// registers. for some ordering of the machine instructions [1,N] a
  568. /// live interval is an interval [i, j) where 1 <= i <= j < N for
  569. /// which a variable is live
  570. void LiveIntervals::computeIntervals() {
  571. DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
  572. << "********** Function: "
  573. << ((Value*)mf_->getFunction())->getName() << '\n');
  574. SmallVector<unsigned, 8> UndefUses;
  575. for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
  576. MBBI != E; ++MBBI) {
  577. MachineBasicBlock *MBB = MBBI;
  578. // Track the index of the current machine instr.
  579. SlotIndex MIIndex = getMBBStartIdx(MBB);
  580. DEBUG(errs() << MBB->getName() << ":\n");
  581. MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
  582. // Create intervals for live-ins to this BB first.
  583. for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
  584. LE = MBB->livein_end(); LI != LE; ++LI) {
  585. handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
  586. // Multiple live-ins can alias the same register.
  587. for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
  588. if (!hasInterval(*AS))
  589. handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
  590. true);
  591. }
  592. // Skip over empty initial indices.
  593. if (getInstructionFromIndex(MIIndex) == 0)
  594. MIIndex = indexes_->getNextNonNullIndex(MIIndex);
  595. for (; MI != miEnd; ++MI) {
  596. DEBUG(errs() << MIIndex << "\t" << *MI);
  597. // Handle defs.
  598. for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
  599. MachineOperand &MO = MI->getOperand(i);
  600. if (!MO.isReg() || !MO.getReg())
  601. continue;
  602. // handle register defs - build intervals
  603. if (MO.isDef())
  604. handleRegisterDef(MBB, MI, MIIndex, MO, i);
  605. else if (MO.isUndef())
  606. UndefUses.push_back(MO.getReg());
  607. }
  608. // Move to the next instr slot.
  609. MIIndex = indexes_->getNextNonNullIndex(MIIndex);
  610. }
  611. }
  612. // Create empty intervals for registers defined by implicit_def's (except
  613. // for those implicit_def that define values which are liveout of their
  614. // blocks.
  615. for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
  616. unsigned UndefReg = UndefUses[i];
  617. (void)getOrCreateInterval(UndefReg);
  618. }
  619. }
  620. LiveInterval* LiveIntervals::createInterval(unsigned reg) {
  621. float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
  622. return new LiveInterval(reg, Weight);
  623. }
  624. /// dupInterval - Duplicate a live interval. The caller is responsible for
  625. /// managing the allocated memory.
  626. LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
  627. LiveInterval *NewLI = createInterval(li->reg);
  628. NewLI->Copy(*li, mri_, getVNInfoAllocator());
  629. return NewLI;
  630. }
  631. /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
  632. /// copy field and returns the source register that defines it.
  633. unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
  634. if (!VNI->getCopy())
  635. return 0;
  636. if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
  637. // If it's extracting out of a physical register, return the sub-register.
  638. unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
  639. if (TargetRegisterInfo::isPhysicalRegister(Reg))
  640. Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
  641. return Reg;
  642. } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
  643. VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
  644. return VNI->getCopy()->getOperand(2).getReg();
  645. unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
  646. if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
  647. return SrcReg;
  648. llvm_unreachable("Unrecognized copy instruction!");
  649. return 0;
  650. }
  651. //===----------------------------------------------------------------------===//
  652. // Register allocator hooks.
  653. //
  654. /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
  655. /// allow one) virtual register operand, then its uses are implicitly using
  656. /// the register. Returns the virtual register.
  657. unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
  658. MachineInstr *MI) const {
  659. unsigned RegOp = 0;
  660. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  661. MachineOperand &MO = MI->getOperand(i);
  662. if (!MO.isReg() || !MO.isUse())
  663. continue;
  664. unsigned Reg = MO.getReg();
  665. if (Reg == 0 || Reg == li.reg)
  666. continue;
  667. if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
  668. !allocatableRegs_[Reg])
  669. continue;
  670. // FIXME: For now, only remat MI with at most one register operand.
  671. assert(!RegOp &&
  672. "Can't rematerialize instruction with multiple register operand!");
  673. RegOp = MO.getReg();
  674. #ifndef NDEBUG
  675. break;
  676. #endif
  677. }
  678. return RegOp;
  679. }
  680. /// isValNoAvailableAt - Return true if the val# of the specified interval
  681. /// which reaches the given instruction also reaches the specified use index.
  682. bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
  683. SlotIndex UseIdx) const {
  684. SlotIndex Index = getInstructionIndex(MI);
  685. VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
  686. LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
  687. return UI != li.end() && UI->valno == ValNo;
  688. }
  689. /// isReMaterializable - Returns true if the definition MI of the specified
  690. /// val# of the specified interval is re-materializable.
  691. bool LiveIntervals::isReMaterializable(const LiveInterval &li,
  692. const VNInfo *ValNo, MachineInstr *MI,
  693. SmallVectorImpl<LiveInterval*> &SpillIs,
  694. bool &isLoad) {
  695. if (DisableReMat)
  696. return false;
  697. if (!tii_->isTriviallyReMaterializable(MI, aa_))
  698. return false;
  699. // Target-specific code can mark an instruction as being rematerializable
  700. // if it has one virtual reg use, though it had better be something like
  701. // a PIC base register which is likely to be live everywhere.
  702. unsigned ImpUse = getReMatImplicitUse(li, MI);
  703. if (ImpUse) {
  704. const LiveInterval &ImpLi = getInterval(ImpUse);
  705. for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
  706. re = mri_->use_end(); ri != re; ++ri) {
  707. MachineInstr *UseMI = &*ri;
  708. SlotIndex UseIdx = getInstructionIndex(UseMI);
  709. if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
  710. continue;
  711. if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
  712. return false;
  713. }
  714. // If a register operand of the re-materialized instruction is going to
  715. // be spilled next, then it's not legal to re-materialize this instruction.
  716. for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
  717. if (ImpUse == SpillIs[i]->reg)
  718. return false;
  719. }
  720. return true;
  721. }
  722. /// isReMaterializable - Returns true if the definition MI of the specified
  723. /// val# of the specified interval is re-materializable.
  724. bool LiveIntervals::isReMaterializable(const LiveInterval &li,
  725. const VNInfo *ValNo, MachineInstr *MI) {
  726. SmallVector<LiveInterval*, 4> Dummy1;
  727. bool Dummy2;
  728. return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
  729. }
  730. /// isReMaterializable - Returns true if every definition of MI of every
  731. /// val# of the specified interval is re-materializable.
  732. bool LiveIntervals::isReMaterializable(const LiveInterval &li,
  733. SmallVectorImpl<LiveInterval*> &SpillIs,
  734. bool &isLoad) {
  735. isLoad = false;
  736. for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
  737. i != e; ++i) {
  738. const VNInfo *VNI = *i;
  739. if (VNI->isUnused())
  740. continue; // Dead val#.
  741. // Is the def for the val# rematerializable?
  742. if (!VNI->isDefAccurate())
  743. return false;
  744. MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
  745. bool DefIsLoad = false;
  746. if (!ReMatDefMI ||
  747. !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
  748. return false;
  749. isLoad |= DefIsLoad;
  750. }
  751. return true;
  752. }
  753. /// FilterFoldedOps - Filter out two-address use operands. Return
  754. /// true if it finds any issue with the operands that ought to prevent
  755. /// folding.
  756. static bool FilterFoldedOps(MachineInstr *MI,
  757. SmallVector<unsigned, 2> &Ops,
  758. unsigned &MRInfo,
  759. SmallVector<unsigned, 2> &FoldOps) {
  760. MRInfo = 0;
  761. for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
  762. unsigned OpIdx = Ops[i];
  763. MachineOperand &MO = MI->getOperand(OpIdx);
  764. // FIXME: fold subreg use.
  765. if (MO.getSubReg())
  766. return true;
  767. if (MO.isDef())
  768. MRInfo |= (unsigned)VirtRegMap::isMod;
  769. else {
  770. // Filter out two-address use operand(s).
  771. if (MI->isRegTiedToDefOperand(OpIdx)) {
  772. MRInfo = VirtRegMap::isModRef;
  773. continue;
  774. }
  775. MRInfo |= (unsigned)VirtRegMap::isRef;
  776. }
  777. FoldOps.push_back(OpIdx);
  778. }
  779. return false;
  780. }
  781. /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
  782. /// slot / to reg or any rematerialized load into ith operand of specified
  783. /// MI. If it is successul, MI is updated with the newly created MI and
  784. /// returns true.
  785. bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
  786. VirtRegMap &vrm, MachineInstr *DefMI,
  787. SlotIndex InstrIdx,
  788. SmallVector<unsigned, 2> &Ops,
  789. bool isSS, int Slot, unsigned Reg) {
  790. // If it is an implicit def instruction, just delete it.
  791. if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
  792. RemoveMachineInstrFromMaps(MI);
  793. vrm.RemoveMachineInstrFromMaps(MI);
  794. MI->eraseFromParent();
  795. ++numFolds;
  796. return true;
  797. }
  798. // Filter the list of operand indexes that are to be folded. Abort if
  799. // any operand will prevent folding.
  800. unsigned MRInfo = 0;
  801. SmallVector<unsigned, 2> FoldOps;
  802. if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
  803. return false;
  804. // The only time it's safe to fold into a two address instruction is when
  805. // it's folding reload and spill from / into a spill stack slot.
  806. if (DefMI && (MRInfo & VirtRegMap::isMod))
  807. return false;
  808. MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
  809. : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
  810. if (fmi) {
  811. // Remember this instruction uses the spill slot.
  812. if (isSS) vrm.addSpillSlotUse(Slot, fmi);
  813. // Attempt to fold the memory reference into the instruction. If
  814. // we can do this, we don't need to insert spill code.
  815. MachineBasicBlock &MBB = *MI->getParent();
  816. if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
  817. vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
  818. vrm.transferSpillPts(MI, fmi);
  819. vrm.transferRestorePts(MI, fmi);
  820. vrm.transferEmergencySpills(MI, fmi);
  821. ReplaceMachineInstrInMaps(MI, fmi);
  822. MI = MBB.insert(MBB.erase(MI), fmi);
  823. ++numFolds;
  824. return true;
  825. }
  826. return false;
  827. }
  828. /// canFoldMemoryOperand - Returns true if the specified load / store
  829. /// folding is possible.
  830. bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
  831. SmallVector<unsigned, 2> &Ops,
  832. bool ReMat) const {
  833. // Filter the list of operand indexes that are to be folded. Abort if
  834. // any operand will prevent folding.
  835. unsigned MRInfo = 0;
  836. SmallVector<unsigned, 2> FoldOps;
  837. if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
  838. return false;
  839. // It's only legal to remat for a use, not a def.
  840. if (ReMat && (MRInfo & VirtRegMap::isMod))
  841. return false;
  842. return tii_->canFoldMemoryOperand(MI, FoldOps);
  843. }
  844. bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
  845. LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
  846. MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
  847. if (mbb == 0)
  848. return false;
  849. for (++itr; itr != li.ranges.end(); ++itr) {
  850. MachineBasicBlock *mbb2 =
  851. indexes_->getMBBCoveringRange(itr->start, itr->end);
  852. if (mbb2 != mbb)
  853. return false;
  854. }
  855. return true;
  856. }
  857. /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
  858. /// interval on to-be re-materialized operands of MI) with new register.
  859. void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
  860. MachineInstr *MI, unsigned NewVReg,
  861. VirtRegMap &vrm) {
  862. // There is an implicit use. That means one of the other operand is
  863. // being remat'ed and the remat'ed instruction has li.reg as an
  864. // use operand. Make sure we rewrite that as well.
  865. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  866. MachineOperand &MO = MI->getOperand(i);
  867. if (!MO.isReg())
  868. continue;
  869. unsigned Reg = MO.getReg();
  870. if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
  871. continue;
  872. if (!vrm.isReMaterialized(Reg))
  873. continue;
  874. MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
  875. MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
  876. if (UseMO)
  877. UseMO->setReg(NewVReg);
  878. }
  879. }
  880. /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
  881. /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
  882. bool LiveIntervals::
  883. rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
  884. bool TrySplit, SlotIndex index, SlotIndex end,
  885. MachineInstr *MI,
  886. MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
  887. unsigned Slot, int LdSlot,
  888. bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
  889. VirtRegMap &vrm,
  890. const TargetRegisterClass* rc,
  891. SmallVector<int, 4> &ReMatIds,
  892. const MachineLoopInfo *loopInfo,
  893. unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
  894. DenseMap<unsigned,unsigned> &MBBVRegsMap,
  895. std::vector<LiveInterval*> &NewLIs) {
  896. bool CanFold = false;
  897. RestartInstruction:
  898. for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
  899. MachineOperand& mop = MI->getOperand(i);
  900. if (!mop.isReg())
  901. continue;
  902. unsigned Reg = mop.getReg();
  903. unsigned RegI = Reg;
  904. if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
  905. continue;
  906. if (Reg != li.reg)
  907. continue;
  908. bool TryFold = !DefIsReMat;
  909. bool FoldSS = true; // Default behavior unless it's a remat.
  910. int FoldSlot = Slot;
  911. if (DefIsReMat) {
  912. // If this is the rematerializable definition MI itself and
  913. // all of its uses are rematerialized, simply delete it.
  914. if (MI == ReMatOrigDefMI && CanDelete) {
  915. DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
  916. << MI << '\n');
  917. RemoveMachineInstrFromMaps(MI);
  918. vrm.RemoveMachineInstrFromMaps(MI);
  919. MI->eraseFromParent();
  920. break;
  921. }
  922. // If def for this use can't be rematerialized, then try folding.
  923. // If def is rematerializable and it's a load, also try folding.
  924. TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
  925. if (isLoad) {
  926. // Try fold loads (from stack slot, constant pool, etc.) into uses.
  927. FoldSS = isLoadSS;
  928. FoldSlot = LdSlot;
  929. }
  930. }
  931. // Scan all of the operands of this instruction rewriting operands
  932. // to use NewVReg instead of li.reg as appropriate. We do this for
  933. // two reasons:
  934. //
  935. // 1. If the instr reads the same spilled vreg multiple times, we
  936. // want to reuse the NewVReg.
  937. // 2. If the instr is a two-addr instruction, we are required to
  938. // keep the src/dst regs pinned.
  939. //
  940. // Keep track of whether we replace a use and/or def so that we can
  941. // create the spill interval with the appropriate range.
  942. HasUse = mop.isUse();
  943. HasDef = mop.isDef();
  944. SmallVector<unsigned, 2> Ops;
  945. Ops.push_back(i);
  946. for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
  947. const MachineOperand &MOj = MI->getOperand(j);
  948. if (!MOj.isReg())
  949. continue;
  950. unsigned RegJ = MOj.getReg();
  951. if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
  952. continue;
  953. if (RegJ == RegI) {
  954. Ops.push_back(j);
  955. if (!MOj.isUndef()) {
  956. HasUse |= MOj.isUse();
  957. HasDef |= MOj.isDef();
  958. }
  959. }
  960. }
  961. // Create a new virtual register for the spill interval.
  962. // Create the new register now so we can map the fold instruction
  963. // to the new register so when it is unfolded we get the correct
  964. // answer.
  965. bool CreatedNewVReg = false;
  966. if (NewVReg == 0) {
  967. NewVReg = mri_->createVirtualRegister(rc);
  968. vrm.grow();
  969. CreatedNewVReg = true;
  970. // The new virtual register should get the same allocation hints as the
  971. // old one.
  972. std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
  973. if (Hint.first || Hint.second)
  974. mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
  975. }
  976. if (!TryFold)
  977. CanFold = false;
  978. else {
  979. // Do not fold load / store here if we are splitting. We'll find an
  980. // optimal point to insert a load / store later.
  981. if (!TrySplit) {
  982. if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
  983. Ops, FoldSS, FoldSlot, NewVReg)) {
  984. // Folding the load/store can completely change the instruction in
  985. // unpredictable ways, rescan it from the beginning.
  986. if (FoldSS) {
  987. // We need to give the new vreg the same stack slot as the
  988. // spilled interval.
  989. vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
  990. }
  991. HasUse = false;
  992. HasDef = false;
  993. CanFold = false;
  994. if (isNotInMIMap(MI))
  995. break;
  996. goto RestartInstruction;
  997. }
  998. } else {
  999. // We'll try to fold it later if it's profitable.
  1000. CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
  1001. }
  1002. }
  1003. mop.setReg(NewVReg);
  1004. if (mop.isImplicit())
  1005. rewriteImplicitOps(li, MI, NewVReg, vrm);
  1006. // Reuse NewVReg for other reads.
  1007. for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
  1008. MachineOperand &mopj = MI->getOperand(Ops[j]);
  1009. mopj.setReg(NewVReg);
  1010. if (mopj.isImplicit())
  1011. rewriteImplicitOps(li, MI, NewVReg, vrm);
  1012. }
  1013. if (CreatedNewVReg) {
  1014. if (DefIsReMat) {
  1015. vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
  1016. if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
  1017. // Each valnum may have its own remat id.
  1018. ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
  1019. } else {
  1020. vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
  1021. }
  1022. if (!CanDelete || (HasUse && HasDef)) {
  1023. // If this is a two-addr instruction then its use operands are
  1024. // rematerializable but its def is not. It should be assigned a
  1025. // stack slot.
  1026. vrm.assignVirt2StackSlot(NewVReg, Slot);
  1027. }
  1028. } else {
  1029. vrm.assignVirt2StackSlot(NewVReg, Slot);
  1030. }
  1031. } else if (HasUse && HasDef &&
  1032. vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
  1033. // If this interval hasn't been assigned a stack slot (because earlier
  1034. // def is a deleted remat def), do it now.
  1035. assert(Slot != VirtRegMap::NO_STACK_SLOT);
  1036. vrm.assignVirt2StackSlot(NewVReg, Slot);
  1037. }
  1038. // Re-matting an instruction with virtual register use. Add the
  1039. // register as an implicit use on the use MI.
  1040. if (DefIsReMat && ImpUse)
  1041. MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
  1042. // Create a new register interval for this spill / remat.
  1043. LiveInterval &nI = getOrCreateInterval(NewVReg);
  1044. if (CreatedNewVReg) {
  1045. NewLIs.push_back(&nI);
  1046. MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
  1047. if (TrySplit)
  1048. vrm.setIsSplitFromReg(NewVReg, li.reg);
  1049. }
  1050. if (HasUse) {
  1051. if (CreatedNewVReg) {
  1052. LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
  1053. nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
  1054. DEBUG(errs() << " +" << LR);
  1055. nI.addRange(LR);
  1056. } else {
  1057. // Extend the split live interval to this def / use.
  1058. SlotIndex End = index.getDefIndex();
  1059. LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
  1060. nI.getValNumInfo(nI.getNumValNums()-1));
  1061. DEBUG(errs() << " +" << LR);
  1062. nI.addRange(LR);
  1063. }
  1064. }
  1065. if (HasDef) {
  1066. LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
  1067. nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
  1068. DEBUG(errs() << " +" << LR);
  1069. nI.addRange(LR);
  1070. }
  1071. DEBUG({
  1072. errs() << "\t\t\t\tAdded new interval: ";
  1073. nI.print(errs(), tri_);
  1074. errs() << '\n';
  1075. });
  1076. }
  1077. return CanFold;
  1078. }
  1079. bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
  1080. const VNInfo *VNI,
  1081. MachineBasicBlock *MBB,
  1082. SlotIndex Idx) const {
  1083. SlotIndex End = getMBBEndIdx(MBB);
  1084. for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
  1085. if (VNI->kills[j].isPHI())
  1086. continue;
  1087. SlotIndex KillIdx = VNI->kills[j];
  1088. if (KillIdx > Idx && KillIdx < End)
  1089. return true;
  1090. }
  1091. return false;
  1092. }
  1093. /// RewriteInfo - Keep track of machine instrs that will be rewritten
  1094. /// during spilling.
  1095. namespace {
  1096. struct RewriteInfo {
  1097. SlotIndex Index;
  1098. MachineInstr *MI;
  1099. bool HasUse;
  1100. bool HasDef;
  1101. RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
  1102. : Index(i), MI(mi), HasUse(u), HasDef(d) {}
  1103. };
  1104. struct RewriteInfoCompare {
  1105. bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
  1106. return LHS.Index < RHS.Index;
  1107. }
  1108. };
  1109. }
  1110. void LiveIntervals::
  1111. rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
  1112. LiveInterval::Ranges::const_iterator &I,
  1113. MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
  1114. unsigned Slot, int LdSlot,
  1115. bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
  1116. VirtRegMap &vrm,
  1117. const TargetRegisterClass* rc,
  1118. SmallVector<int, 4> &ReMatIds,
  1119. const MachineLoopInfo *loopInfo,
  1120. BitVector &SpillMBBs,
  1121. DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
  1122. BitVector &RestoreMBBs,
  1123. DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
  1124. DenseMap<unsigned,unsigned> &MBBVRegsMap,
  1125. std::vector<LiveInterval*> &NewLIs) {
  1126. bool AllCanFold = true;
  1127. unsigned NewVReg = 0;
  1128. SlotIndex start = I->start.getBaseIndex();
  1129. SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
  1130. // First collect all the def / use in this live range that will be rewritten.
  1131. // Make sure they are sorted according to instruction index.
  1132. std::vector<RewriteInfo> RewriteMIs;
  1133. for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
  1134. re = mri_->reg_end(); ri != re; ) {
  1135. MachineInstr *MI = &*ri;
  1136. MachineOperand &O = ri.getOperand();
  1137. ++ri;
  1138. assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
  1139. SlotIndex index = getInstructionIndex(MI);
  1140. if (index < start || index >= end)
  1141. continue;
  1142. if (O.isUndef())
  1143. // Must be defined by an implicit def. It should not be spilled. Note,
  1144. // this is for correctness reason. e.g.
  1145. // 8 %reg1024<def> = IMPLICIT_DEF
  1146. // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
  1147. // The live range [12, 14) are not part of the r1024 live interval since
  1148. // it's defined by an implicit def. It will not conflicts with live
  1149. // interval of r1025. Now suppose both registers are spilled, you can
  1150. // easily see a situation where both registers are reloaded before
  1151. // the INSERT_SUBREG and both target registers that would overlap.
  1152. continue;
  1153. RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
  1154. }
  1155. std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
  1156. unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
  1157. // Now rewrite the defs and uses.
  1158. for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
  1159. RewriteInfo &rwi = RewriteMIs[i];
  1160. ++i;
  1161. SlotIndex index = rwi.Index;
  1162. bool MIHasUse = rwi.HasUse;
  1163. bool MIHasDef = rwi.HasDef;
  1164. MachineInstr *MI = rwi.MI;
  1165. // If MI def and/or use the same register multiple times, then there
  1166. // are multiple entries.
  1167. unsigned NumUses = MIHasUse;
  1168. while (i != e && RewriteMIs[i].MI == MI) {
  1169. assert(RewriteMIs[i].Index == index);
  1170. bool isUse = RewriteMIs[i].HasUse;
  1171. if (isUse) ++NumUses;
  1172. MIHasUse |= isUse;
  1173. MIHasDef |= RewriteMIs[i].HasDef;
  1174. ++i;
  1175. }
  1176. MachineBasicBlock *MBB = MI->getParent();
  1177. if (ImpUse && MI != ReMatDefMI) {
  1178. // Re-matting an instruction with virtual register use. Update the
  1179. // register interval's spill weight to HUGE_VALF to prevent it from
  1180. // being spilled.
  1181. LiveInterval &ImpLi = getInterval(ImpUse);
  1182. ImpLi.weight = HUGE_VALF;
  1183. }
  1184. unsigned MBBId = MBB->getNumber();
  1185. unsigned ThisVReg = 0;
  1186. if (TrySplit) {
  1187. DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
  1188. if (NVI != MBBVRegsMap.end()) {
  1189. ThisVReg = NVI->second;
  1190. // One common case:
  1191. // x = use
  1192. // ...
  1193. // ...
  1194. // def = ...
  1195. // = use
  1196. // It's better to start a new interval to avoid artifically
  1197. // extend the new interval.
  1198. if (MIHasDef && !MIHasUse) {
  1199. MBBVRegsMap.erase(MBB->getNumber());
  1200. ThisVReg = 0;
  1201. }
  1202. }
  1203. }
  1204. bool IsNew = ThisVReg == 0;
  1205. if (IsNew) {
  1206. // This ends the previous live interval. If all of its def / use
  1207. // can be folded, give it a low spill weight.
  1208. if (NewVReg && TrySplit && AllCanFold) {
  1209. LiveInterval &nI = getOrCreateInterval(NewVReg);
  1210. nI.weight /= 10.0F;
  1211. }
  1212. AllCanFold = true;
  1213. }
  1214. NewVReg = ThisVReg;
  1215. bool HasDef = false;
  1216. bool HasUse = false;
  1217. bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
  1218. index, end, MI, ReMatOrigDefMI, ReMatDefMI,
  1219. Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
  1220. CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
  1221. ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
  1222. if (!HasDef && !HasUse)
  1223. continue;
  1224. AllCanFold &= CanFold;
  1225. // Update weight of spill interval.
  1226. LiveInterval &nI = getOrCreateInterval(NewVReg);
  1227. if (!TrySplit) {
  1228. // The spill weight is now infinity as it cannot be spilled again.
  1229. nI.weight = HUGE_VALF;
  1230. continue;
  1231. }
  1232. // Keep track of the last def and first use in each MBB.
  1233. if (HasDef) {
  1234. if (MI != ReMatOrigDefMI || !CanDelete) {
  1235. bool HasKill = false;
  1236. if (!HasUse)
  1237. HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
  1238. else {
  1239. // If this is a two-address code, then this index starts a new VNInfo.
  1240. const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
  1241. if (VNI)
  1242. HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
  1243. }
  1244. DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
  1245. SpillIdxes.find(MBBId);
  1246. if (!HasKill) {
  1247. if (SII == SpillIdxes.end()) {
  1248. std::vector<SRInfo> S;
  1249. S.push_back(SRInfo(index, NewVReg, true));
  1250. SpillIdxes.insert(std::make_pair(MBBId, S));
  1251. } else if (SII->second.back().vreg != NewVReg) {
  1252. SII->second.push_back(SRInfo(index, NewVReg, true));
  1253. } else if (index > SII->second.back().index) {
  1254. // If there is an earlier def and this is a two-address
  1255. // instruction, then it's not possible to fold the store (which
  1256. // would also fold the load).
  1257. SRInfo &Info = SII->second.back();
  1258. Info.index = index;
  1259. Info.canFold = !HasUse;
  1260. }
  1261. SpillMBBs.set(MBBId);
  1262. } else if (SII != SpillIdxes.end() &&
  1263. SII->second.back().vreg == NewVReg &&
  1264. index > SII->second.back().index) {
  1265. // There is an earlier def that's not killed (must be two-address).
  1266. // The spill is no longer needed.
  1267. SII->second.pop_back();
  1268. if (SII->second.empty()) {
  1269. SpillIdxes.erase(MBBId);
  1270. SpillMBBs.reset(MBBId);
  1271. }
  1272. }
  1273. }
  1274. }
  1275. if (HasUse) {
  1276. DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
  1277. SpillIdxes.find(MBBId);
  1278. if (SII != SpillIdxes.end() &&
  1279. SII->second.back().vreg == NewVReg &&
  1280. index > SII->second.back().index)
  1281. // Use(s) following the last def, it's not safe to fold the spill.
  1282. SII->second.back().canFold = false;
  1283. DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
  1284. RestoreIdxes.find(MBBId);
  1285. if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
  1286. // If we are splitting live intervals, only fold if it's the first
  1287. // use and there isn't another use later in the MBB.
  1288. RII->second.back().canFold = false;
  1289. else if (IsNew) {
  1290. // Only need a reload if there isn't an earlier def / use.
  1291. if (RII == RestoreIdxes.end()) {
  1292. std::vector<SRInfo> Infos;
  1293. Infos.push_back(SRInfo(index, NewVReg, true));
  1294. RestoreIdxes.insert(std::make_pair(MBBId, Infos));
  1295. } else {
  1296. RII->second.push_back(SRInfo(index, NewVReg, true));
  1297. }
  1298. RestoreMBBs.set(MBBId);
  1299. }
  1300. }
  1301. // Update spill weight.
  1302. unsigned loopDepth = loopInfo->getLoopDepth(MBB);
  1303. nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
  1304. }
  1305. if (NewVReg && TrySplit && AllCanFold) {
  1306. // If all of its def / use can be folded, give it a low spill weight.
  1307. LiveInterval &nI = getOrCreateInterval(NewVReg);
  1308. nI.weight /= 10.0F;
  1309. }
  1310. }
  1311. bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
  1312. unsigned vr, BitVector &RestoreMBBs,
  1313. DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
  1314. if (!RestoreMBBs[Id])
  1315. return false;
  1316. std::vector<SRInfo> &Restores = RestoreIdxes[Id];
  1317. for (unsigned i = 0, e = Restores.size(); i != e; ++i)
  1318. if (Restores[i].index == index &&
  1319. Restores[i].vreg == vr &&
  1320. Restores[i].canFold)
  1321. return true;
  1322. return false;
  1323. }
  1324. void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
  1325. unsigned vr, BitVector &RestoreMBBs,
  1326. DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
  1327. if (!RestoreMBBs[Id])
  1328. return;
  1329. std::vector<SRInfo> &Restores = RestoreIdxes[Id];
  1330. for (unsigned i = 0, e = Restores.size(); i != e; ++i)
  1331. if (Restores[i].index == index && Restores[i].vreg)
  1332. Restores[i].index = SlotIndex();
  1333. }
  1334. /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
  1335. /// spilled and create empty intervals for their uses.
  1336. void
  1337. LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
  1338. const TargetRegisterClass* rc,
  1339. std::vector<LiveInterval*> &NewLIs) {
  1340. for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
  1341. re = mri_->reg_end(); ri != re; ) {
  1342. MachineOperand &O = ri.getOperand();
  1343. MachineInstr *MI = &*ri;
  1344. ++ri;
  1345. if (O.isDef()) {
  1346. assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
  1347. "Register def was not rewritten?");
  1348. RemoveMachineInstrFromMaps(MI);
  1349. vrm.RemoveMachineInstrFromMaps(MI);
  1350. MI->eraseFromParent();
  1351. } else {
  1352. // This must be an use of an implicit_def so it's not part of the live
  1353. // interval. Create a new empty live interval for it.
  1354. // FIXME: Can we simply erase some of the instructions? e.g. Stores?
  1355. unsigned NewVReg = mri_->createVirtualRegister(rc);
  1356. vrm.grow();
  1357. vrm.setIsImplicitlyDefined(NewVReg);
  1358. NewLIs.push_back(&getOrCreateInterval(NewVReg));
  1359. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  1360. MachineOperand &MO = MI->getOperand(i);
  1361. if (MO.isReg() && MO.getReg() == li.reg) {
  1362. MO.setReg(NewVReg);
  1363. MO.setIsUndef();
  1364. }
  1365. }
  1366. }
  1367. }
  1368. }
  1369. std::vector<LiveInterval*> LiveIntervals::
  1370. addIntervalsForSpillsFast(const LiveInterval &li,
  1371. const MachineLoopInfo *loopInfo,
  1372. VirtRegMap &vrm) {
  1373. unsigned slot = vrm.assignVirt2StackSlot(li.reg);
  1374. std::vector<LiveInterval*> added;
  1375. assert(li.weight != HUGE_VALF &&
  1376. "attempt to spill already spilled interval!");
  1377. DEBUG({
  1378. errs() << "\t\t\t\tadding intervals for spills for interval: ";
  1379. li.dump();
  1380. errs() << '\n';
  1381. });
  1382. const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
  1383. MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
  1384. while (RI != mri_->reg_end()) {
  1385. MachineInstr* MI = &*RI;
  1386. SmallVector<unsigned, 2> Indices;
  1387. bool HasUse = false;
  1388. bool HasDef = false;
  1389. for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
  1390. MachineOperand& mop = MI->getOperand(i);
  1391. if (!mop.isReg() || mop.getReg() != li.reg) continue;
  1392. HasUse |= MI->getOperand(i).isUse();
  1393. HasDef |= MI->getOperand(i).isDef();
  1394. Indices.push_back(i);
  1395. }
  1396. if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
  1397. Indices, true, slot, li.reg)) {
  1398. unsigned NewVReg = mri_->createVirtualRegister(rc);
  1399. vrm.grow();
  1400. vrm.assignVirt2StackSlot(NewVReg, slot);
  1401. // create a new register for this spill
  1402. LiveInterval &nI = getOrCreateInterval(NewVReg);
  1403. // the spill weight is now infinity as it
  1404. // cannot be spilled again
  1405. nI.weight = HUGE_VALF;
  1406. // Rewrite register operands to use the new vreg.
  1407. for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
  1408. E = Indices.end(); I != E; ++I) {
  1409. MI->getOperand(*I).setReg(NewVReg);
  1410. if (MI->getOperand(*I).isUse())
  1411. MI->getOperand(*I).setIsKill(true);
  1412. }
  1413. // Fill in the new live interval.
  1414. SlotIndex index = getInstructionIndex(MI);
  1415. if (HasUse) {
  1416. LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
  1417. nI.getNextValue(SlotIndex(), 0, false,
  1418. getVNInfoAllocator()));
  1419. DEBUG(errs() << " +" << LR);
  1420. nI.addRange(LR);
  1421. vrm.addRestorePoint(NewVReg, MI);
  1422. }
  1423. if (HasDef) {
  1424. LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
  1425. nI.getNextValue(SlotIndex(), 0, false,
  1426. getVNInfoAllocator()));
  1427. DEBUG(errs() << " +" << LR);
  1428. nI.addRange(LR);
  1429. vrm.addSpillPoint(NewVReg, true, MI);
  1430. }
  1431. added.push_back(&nI);
  1432. DEBUG({
  1433. errs() << "\t\t\t\tadded new interval: ";
  1434. nI.dump();
  1435. errs() << '\n';
  1436. });
  1437. }
  1438. RI = mri_->reg_begin(li.reg);
  1439. }
  1440. return added;
  1441. }
  1442. std::vector<LiveInterval*> LiveIntervals::
  1443. addIntervalsForSpills(const LiveInterval &li,
  1444. SmallVectorImpl<LiveInterval*> &SpillIs,
  1445. const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
  1446. if (EnableFastSpilling)
  1447. return addIntervalsForSpillsFast(li, loopInfo, vrm);
  1448. assert(li.weight != HUGE_VALF &&
  1449. "attempt to spill already spilled interval!");
  1450. DEBUG({
  1451. errs() << "\t\t\t\tadding intervals for spills for interval: ";
  1452. li.print(errs(), tri_);
  1453. errs() << '\n';
  1454. });
  1455. // Each bit specify whether a spill is required in the MBB.
  1456. BitVector SpillMBBs(mf_->getNumBlockIDs());
  1457. DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
  1458. BitVector RestoreMBBs(mf_->getNumBlockIDs());
  1459. DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
  1460. DenseMap<unsigned,unsigned> MBBVRegsMap;
  1461. std::vector<LiveInterval*> NewLIs;
  1462. const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
  1463. unsigned NumValNums = li.getNumValNums();
  1464. SmallVector<MachineInstr*, 4> ReMatDefs;
  1465. ReMatDefs.resize(NumValNums, NULL);
  1466. SmallVector<MachineInstr*, 4> ReMatOrigDefs;
  1467. ReMatOrigDefs.resize(NumValNums, NULL);
  1468. SmallVector<int, 4> ReMatIds;
  1469. ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
  1470. BitVector ReMatDelete(NumValNums);
  1471. unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
  1472. // Spilling a split live interval. It cannot be split any further. Also,
  1473. // it's also guaranteed to be a single val# / range interval.
  1474. if (vrm.getPreSplitReg(li.reg)) {
  1475. vrm.setIsSplitFromReg(li.reg, 0);
  1476. // Unset the split kill marker on the last use.
  1477. SlotIndex KillIdx = vrm.getKillPoint(li.reg);
  1478. if (KillIdx != SlotIndex()) {
  1479. MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
  1480. assert(KillMI && "Last use disappeared?");
  1481. int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
  1482. assert(KillOp != -1 && "Last use disappeared?");
  1483. KillMI->getOperand(KillOp).setIsKill(false);
  1484. }
  1485. vrm.removeKillPoint(li.reg);
  1486. bool DefIsReMat = vrm.isReMaterialized(li.reg);
  1487. Slot = vrm.getStackSlot(li.reg);
  1488. assert(Slot != VirtRegMap::MAX_STACK_SLOT);
  1489. MachineInstr *ReMatDefMI = DefIsReMat ?
  1490. vrm.getReMaterializedMI(li.reg) : NULL;
  1491. int LdSlot = 0;
  1492. bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
  1493. bool isLoad = isLoadSS ||
  1494. (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
  1495. bool IsFirstRange = true;
  1496. for (LiveInterval::Ranges::const_iterator
  1497. I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
  1498. // If this is a split live interval with multiple ranges, it means there
  1499. // are two-address instructions that re-defined the value. Only the
  1500. // first def can be rematerialized!
  1501. if (IsFirstRange) {
  1502. // Note ReMatOrigDefMI has already been deleted.
  1503. rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
  1504. Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
  1505. false, vrm, rc, ReMatIds, loopInfo,
  1506. SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
  1507. MBBVRegsMap, NewLIs);
  1508. } else {
  1509. rewriteInstructionsForSpills(li, false, I, NULL, 0,
  1510. Slot, 0, false, false, false,
  1511. false, vrm, rc, ReMatIds, loopInfo,
  1512. SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
  1513. MBBVRegsMap, NewLIs);
  1514. }
  1515. IsFirstRange = false;
  1516. }
  1517. handleSpilledImpDefs(li, vrm, rc, NewLIs);
  1518. return NewLIs;
  1519. }
  1520. bool TrySplit = !intervalIsInOneMBB(li);
  1521. if (TrySplit)
  1522. ++numSplits;
  1523. bool NeedStackSlot = false;
  1524. for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
  1525. i != e; ++i) {
  1526. const VNInfo *VNI = *i;
  1527. unsigned VN = VNI->id;
  1528. if (VNI->isUnused())
  1529. continue; // Dead val#.
  1530. // Is the def for the val# rematerializable?
  1531. MachineInstr *ReMatDefMI = VNI->isDefAccurate()
  1532. ? getInstructionFromIndex(VNI->def) : 0;
  1533. bool dummy;
  1534. if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
  1535. // Remember how to remat the def of this val#.
  1536. ReMatOrigDefs[VN] = ReMatDefMI;
  1537. // Original def may be modified so we have to make a copy here.
  1538. MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
  1539. CloneMIs.push_back(Clone);
  1540. ReMatDefs[VN] = Clone;
  1541. bool CanDelete = true;
  1542. if (VNI->hasPHIKill()) {
  1543. // A kill is a phi node, not all of its uses can be rematerialized.
  1544. // It must not be deleted.
  1545. CanDelete = false;
  1546. // Need a stack slot if there is any live range where uses cannot be
  1547. // rematerialized.
  1548. NeedStackSlot = true;
  1549. }
  1550. if (CanDelete)
  1551. ReMatDelete.set(VN);
  1552. } else {
  1553. // Need a stack slot if there is any live range where uses cannot be
  1554. // rematerialized.
  1555. NeedStackSlot = true;
  1556. }
  1557. }
  1558. // One stack slot per live interval.
  1559. if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
  1560. if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
  1561. Slot = vrm.assignVirt2StackSlot(li.reg);
  1562. // This case only occurs when the prealloc splitter has already assigned
  1563. // a stack slot to this vreg.
  1564. else
  1565. Slot = vrm.getStackSlot(li.reg);
  1566. }
  1567. // Create new intervals and rewrite defs and uses.
  1568. for (LiveInterval::Ranges::const_iterator
  1569. I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
  1570. MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
  1571. MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
  1572. bool DefIsReMat = ReMatDefMI != NULL;
  1573. bool CanDelete = ReMatDelete[I->valno->id];
  1574. int LdSlot = 0;
  1575. bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
  1576. bool isLoad = isLoadSS ||
  1577. (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
  1578. rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
  1579. Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
  1580. CanDelete, vrm, rc, ReMatIds, loopInfo,
  1581. SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
  1582. MBBVRegsMap, NewLIs);
  1583. }
  1584. // Insert spills / restores if we are splitting.
  1585. if (!TrySplit) {
  1586. handleSpilledImpDefs(li, vrm, rc, NewLIs);
  1587. return NewLIs;
  1588. }
  1589. SmallPtrSet<LiveInterval*, 4> AddedKill;
  1590. SmallVector<unsigned, 2> Ops;
  1591. if (NeedStackSlot) {
  1592. int Id = SpillMBBs.find_first();
  1593. while (Id != -1) {
  1594. std::vector<SRInfo> &spills = SpillIdxes[Id];
  1595. for (unsigned i = 0, e = spills.size(); i != e; ++i) {
  1596. SlotIndex index = spills[i].index;
  1597. unsigned VReg = spills[i].vreg;
  1598. LiveInterval &nI = getOrCreateInterval(VReg);
  1599. bool isReMat = vrm.isReMaterialized(VReg);
  1600. MachineInstr *MI = getInstructionFromIndex(index);
  1601. bool CanFold = false;
  1602. bool FoundUse = false;
  1603. Ops.clear();
  1604. if (spills[i].canFold) {
  1605. CanFold = true;
  1606. for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
  1607. MachineOperand &MO = MI->getOperand(j);
  1608. if (!MO.isReg() || MO.getReg() != VReg)
  1609. continue;
  1610. Ops.push_back(j);
  1611. if (MO.isDef())
  1612. continue;
  1613. if (isReMat ||
  1614. (!FoundUse && !alsoFoldARestore(Id, index, VReg,
  1615. RestoreMBBs, RestoreIdxes))) {
  1616. // MI has two-address uses of the same register. If the use
  1617. // isn't the first and only use in the BB, then we can't fold
  1618. // it. FIXME: Move this to rewriteInstructionsForSpills.
  1619. CanFold = false;
  1620. break;
  1621. }
  1622. FoundUse = true;
  1623. }
  1624. }
  1625. // Fold the store into the def if possible.
  1626. bool Folded = false;
  1627. if (CanFold && !Ops.empty()) {
  1628. if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
  1629. Folded = true;
  1630. if (FoundUse) {
  1631. // Also folded uses, do not issue a load.
  1632. eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
  1633. nI.removeRange(index.getLoadIndex(), index.getDefIndex());
  1634. }
  1635. nI.removeRange(index.getDefIndex(), index.getStoreIndex());
  1636. }
  1637. }
  1638. // Otherwise tell the spiller to issue a spill.
  1639. if (!Folded) {
  1640. LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
  1641. bool isKill = LR->end == index.getStoreIndex();
  1642. if (!MI->registerDefIsDead(nI.reg))
  1643. // No need to spill a dead def.
  1644. vrm.addSpillPoint(VReg, isKill, MI);
  1645. if (isKill)
  1646. AddedKill.insert(&nI);
  1647. }
  1648. }
  1649. Id = SpillMBBs.find_next(Id);
  1650. }
  1651. }
  1652. int Id = RestoreMBBs.find_first();
  1653. while (Id != -1) {
  1654. std::vector<SRInfo> &restores = RestoreIdxes[Id];
  1655. for (unsigned i = 0, e = restores.size(); i != e; ++i) {
  1656. SlotIndex index = restores[i].index;
  1657. if (index == SlotIndex())
  1658. continue;
  1659. unsigned VReg = restores[i].vreg;
  1660. LiveInterval &nI = getOrCreateInterval(VReg);
  1661. bool isReMat = vrm.isReMaterialized(VReg);
  1662. MachineInstr *MI = getInstructionFromIndex(index);
  1663. bool CanFold = false;
  1664. Ops.clear();
  1665. if (restores[i].canFold) {
  1666. CanFold = true;
  1667. for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
  1668. MachineOperand &MO = MI->getOperand(j);
  1669. if (!MO.isReg() || MO.getReg() != VReg)
  1670. continue;
  1671. if (MO.isDef()) {
  1672. // If this restore were to be folded, it would have been folded
  1673. // already.
  1674. CanFold = false;
  1675. break;
  1676. }
  1677. Ops.push_back(j);
  1678. }
  1679. }
  1680. // Fold the load into the use if possible.
  1681. bool Folded = false;
  1682. if (CanFold && !Ops.empty()) {
  1683. if (!isReMat)
  1684. Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
  1685. else {
  1686. MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
  1687. int LdSlot = 0;
  1688. bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
  1689. // If the rematerializable def is a load, also try to fold it.
  1690. if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
  1691. Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
  1692. Ops, isLoadSS, LdSlot, VReg);
  1693. if (!Folded) {
  1694. unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
  1695. if (ImpUse) {
  1696. // Re-matting an instruction with virtual register use. Add the
  1697. // register as an implicit use on the use MI and update the register
  1698. // interval's spill weight to HUGE_VALF to prevent it from being
  1699. // spilled.
  1700. LiveInterval &ImpLi = getInterval(ImpUse);
  1701. ImpLi.weight = HUGE_VALF;
  1702. MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
  1703. }
  1704. }
  1705. }
  1706. }
  1707. // If folding is not possible / failed, then tell the spiller to issue a
  1708. // load / rematerialization for us.
  1709. if (Folded)
  1710. nI.removeRange(index.getLoadIndex(), index.getDefIndex());
  1711. else
  1712. vrm.addRestorePoint(VReg, MI);
  1713. }
  1714. Id = RestoreMBBs.find_next(Id);
  1715. }
  1716. // Finalize intervals: add kills, finalize spill weights, and filter out
  1717. // dead intervals.
  1718. std::vector<LiveInterval*> RetNewLIs;
  1719. for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
  1720. LiveInterval *LI = NewLIs[i];
  1721. if (!LI->empty()) {
  1722. LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
  1723. if (!AddedKill.count(LI)) {
  1724. LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
  1725. SlotIndex LastUseIdx = LR->end.getBaseIndex();
  1726. MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
  1727. int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
  1728. assert(UseIdx != -1);
  1729. if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
  1730. LastUse->getOperand(UseIdx).setIsKill();
  1731. vrm.addKillPoint(LI->reg, LastUseIdx);
  1732. }
  1733. }
  1734. RetNewLIs.push_back(LI);
  1735. }
  1736. }
  1737. handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
  1738. return RetNewLIs;
  1739. }
  1740. /// hasAllocatableSuperReg - Return true if the specified physical register has
  1741. /// any super register that's allocatable.
  1742. bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
  1743. for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
  1744. if (allocatableRegs_[*AS] && hasInterval(*AS))
  1745. return true;
  1746. return false;
  1747. }
  1748. /// getRepresentativeReg - Find the largest super register of the specified
  1749. /// physical register.
  1750. unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
  1751. // Find the largest super-register that is allocatable.
  1752. unsigned BestReg = Reg;
  1753. for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
  1754. unsigned SuperReg = *AS;
  1755. if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
  1756. BestReg = SuperReg;
  1757. break;
  1758. }
  1759. }
  1760. return BestReg;
  1761. }
  1762. /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
  1763. /// specified interval that conflicts with the specified physical register.
  1764. unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
  1765. unsigned PhysReg) const {
  1766. unsigned NumConflicts = 0;
  1767. const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
  1768. for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
  1769. E = mri_->reg_end(); I != E; ++I) {
  1770. MachineOperand &O = I.getOperand();
  1771. MachineInstr *MI = O.getParent();
  1772. SlotIndex Index = getInstructionIndex(MI);
  1773. if (pli.liveAt(Index))
  1774. ++NumConflicts;
  1775. }
  1776. return NumConflicts;
  1777. }
  1778. /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
  1779. /// around all defs and uses of the specified interval. Return true if it
  1780. /// was able to cut its interval.
  1781. bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
  1782. unsigned PhysReg, VirtRegMap &vrm) {
  1783. unsigned SpillReg = getRepresentativeReg(PhysReg);
  1784. for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
  1785. // If there are registers which alias PhysReg, but which are not a
  1786. // sub-register of the chosen representative super register. Assert
  1787. // since we can't handle it yet.
  1788. assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
  1789. tri_->isSuperRegister(*AS, SpillReg));
  1790. bool Cut = false;
  1791. SmallVector<unsigned, 4> PRegs;
  1792. if (hasInterval(SpillReg))
  1793. PRegs.push_back(SpillReg);
  1794. else {
  1795. SmallSet<unsigned, 4> Added;
  1796. for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
  1797. if (Added.insert(*AS) && hasInterval(*AS)) {
  1798. PRegs.push_back(*AS);
  1799. for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
  1800. Added.insert(*ASS);
  1801. }
  1802. }
  1803. SmallPtrSet<MachineInstr*, 8> SeenMIs;
  1804. for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
  1805. E = mri_->reg_end(); I != E; ++I) {
  1806. MachineOperand &O = I.getOperand();
  1807. MachineInstr *MI = O.getParent();
  1808. if (SeenMIs.count(MI))
  1809. continue;
  1810. SeenMIs.insert(MI);
  1811. SlotIndex Index = getInstructionIndex(MI);
  1812. for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
  1813. unsigned PReg = PRegs[i];
  1814. LiveInterval &pli = getInterval(PReg);
  1815. if (!pli.liveAt(Index))
  1816. continue;
  1817. vrm.addEmergencySpill(PReg, MI);
  1818. SlotIndex StartIdx = Index.getLoadIndex();
  1819. SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
  1820. if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
  1821. pli.removeRange(StartIdx, EndIdx);
  1822. Cut = true;
  1823. } else {
  1824. std::string msg;
  1825. raw_string_ostream Msg(msg);
  1826. Msg << "Ran out of registers during register allocation!";
  1827. if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
  1828. Msg << "\nPlease check your inline asm statement for invalid "
  1829. << "constraints:\n";
  1830. MI->print(Msg, tm_);
  1831. }
  1832. llvm_report_error(Msg.str());
  1833. }
  1834. for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
  1835. if (!hasInterval(*AS))
  1836. continue;
  1837. LiveInterval &spli = getInterval(*AS);
  1838. if (spli.liveAt(Index))
  1839. spli.removeRange(Index.getLoadIndex(),
  1840. Index.getNextIndex().getBaseIndex());
  1841. }
  1842. }
  1843. }
  1844. return Cut;
  1845. }
  1846. LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
  1847. MachineInstr* startInst) {
  1848. LiveInterval& Interval = getOrCreateInterval(reg);
  1849. VNInfo* VN = Interval.getNextValue(
  1850. SlotIndex(getInstructionIndex(startInst).getDefIndex()),
  1851. startInst, true, getVNInfoAllocator());
  1852. VN->setHasPHIKill(true);
  1853. VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
  1854. LiveRange LR(
  1855. SlotIndex(getInstructionIndex(startInst).getDefIndex()),
  1856. getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
  1857. Interval.addRange(LR);
  1858. return LR;
  1859. }