MachineCSE.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. //===-- MachineCSE.cpp - Machine Common Subexpression Elimination Pass ----===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass performs global common subexpression elimination on machine
  11. // instructions using a scoped hash table based value numbering scheme. It
  12. // must be run while the machine function is still in SSA form.
  13. //
  14. //===----------------------------------------------------------------------===//
  15. #define DEBUG_TYPE "machine-cse"
  16. #include "llvm/CodeGen/Passes.h"
  17. #include "llvm/ADT/DenseMap.h"
  18. #include "llvm/ADT/ScopedHashTable.h"
  19. #include "llvm/ADT/SmallSet.h"
  20. #include "llvm/ADT/Statistic.h"
  21. #include "llvm/Analysis/AliasAnalysis.h"
  22. #include "llvm/CodeGen/MachineDominators.h"
  23. #include "llvm/CodeGen/MachineInstr.h"
  24. #include "llvm/CodeGen/MachineRegisterInfo.h"
  25. #include "llvm/Support/Debug.h"
  26. #include "llvm/Support/RecyclingAllocator.h"
  27. #include "llvm/Target/TargetInstrInfo.h"
  28. using namespace llvm;
  29. STATISTIC(NumCoalesces, "Number of copies coalesced");
  30. STATISTIC(NumCSEs, "Number of common subexpression eliminated");
  31. STATISTIC(NumPhysCSEs,
  32. "Number of physreg referencing common subexpr eliminated");
  33. STATISTIC(NumCrossBBCSEs,
  34. "Number of cross-MBB physreg referencing CS eliminated");
  35. STATISTIC(NumCommutes, "Number of copies coalesced after commuting");
  36. namespace {
  37. class MachineCSE : public MachineFunctionPass {
  38. const TargetInstrInfo *TII;
  39. const TargetRegisterInfo *TRI;
  40. AliasAnalysis *AA;
  41. MachineDominatorTree *DT;
  42. MachineRegisterInfo *MRI;
  43. public:
  44. static char ID; // Pass identification
  45. MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(5), CurrVN(0) {
  46. initializeMachineCSEPass(*PassRegistry::getPassRegistry());
  47. }
  48. virtual bool runOnMachineFunction(MachineFunction &MF);
  49. virtual void getAnalysisUsage(AnalysisUsage &AU) const {
  50. AU.setPreservesCFG();
  51. MachineFunctionPass::getAnalysisUsage(AU);
  52. AU.addRequired<AliasAnalysis>();
  53. AU.addPreservedID(MachineLoopInfoID);
  54. AU.addRequired<MachineDominatorTree>();
  55. AU.addPreserved<MachineDominatorTree>();
  56. }
  57. virtual void releaseMemory() {
  58. ScopeMap.clear();
  59. Exps.clear();
  60. }
  61. private:
  62. const unsigned LookAheadLimit;
  63. typedef RecyclingAllocator<BumpPtrAllocator,
  64. ScopedHashTableVal<MachineInstr*, unsigned> > AllocatorTy;
  65. typedef ScopedHashTable<MachineInstr*, unsigned,
  66. MachineInstrExpressionTrait, AllocatorTy> ScopedHTType;
  67. typedef ScopedHTType::ScopeTy ScopeType;
  68. DenseMap<MachineBasicBlock*, ScopeType*> ScopeMap;
  69. ScopedHTType VNT;
  70. SmallVector<MachineInstr*, 64> Exps;
  71. unsigned CurrVN;
  72. bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB);
  73. bool isPhysDefTriviallyDead(unsigned Reg,
  74. MachineBasicBlock::const_iterator I,
  75. MachineBasicBlock::const_iterator E) const;
  76. bool hasLivePhysRegDefUses(const MachineInstr *MI,
  77. const MachineBasicBlock *MBB,
  78. SmallSet<unsigned,8> &PhysRefs,
  79. SmallVectorImpl<unsigned> &PhysDefs,
  80. bool &PhysUseDef) const;
  81. bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
  82. SmallSet<unsigned,8> &PhysRefs,
  83. SmallVectorImpl<unsigned> &PhysDefs,
  84. bool &NonLocal) const;
  85. bool isCSECandidate(MachineInstr *MI);
  86. bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
  87. MachineInstr *CSMI, MachineInstr *MI);
  88. void EnterScope(MachineBasicBlock *MBB);
  89. void ExitScope(MachineBasicBlock *MBB);
  90. bool ProcessBlock(MachineBasicBlock *MBB);
  91. void ExitScopeIfDone(MachineDomTreeNode *Node,
  92. DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren);
  93. bool PerformCSE(MachineDomTreeNode *Node);
  94. };
  95. } // end anonymous namespace
  96. char MachineCSE::ID = 0;
  97. char &llvm::MachineCSEID = MachineCSE::ID;
  98. INITIALIZE_PASS_BEGIN(MachineCSE, "machine-cse",
  99. "Machine Common Subexpression Elimination", false, false)
  100. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  101. INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
  102. INITIALIZE_PASS_END(MachineCSE, "machine-cse",
  103. "Machine Common Subexpression Elimination", false, false)
  104. bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
  105. MachineBasicBlock *MBB) {
  106. bool Changed = false;
  107. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  108. MachineOperand &MO = MI->getOperand(i);
  109. if (!MO.isReg() || !MO.isUse())
  110. continue;
  111. unsigned Reg = MO.getReg();
  112. if (!TargetRegisterInfo::isVirtualRegister(Reg))
  113. continue;
  114. if (!MRI->hasOneNonDBGUse(Reg))
  115. // Only coalesce single use copies. This ensure the copy will be
  116. // deleted.
  117. continue;
  118. MachineInstr *DefMI = MRI->getVRegDef(Reg);
  119. if (!DefMI->isCopy())
  120. continue;
  121. unsigned SrcReg = DefMI->getOperand(1).getReg();
  122. if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
  123. continue;
  124. if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg())
  125. continue;
  126. if (!MRI->constrainRegClass(SrcReg, MRI->getRegClass(Reg)))
  127. continue;
  128. DEBUG(dbgs() << "Coalescing: " << *DefMI);
  129. DEBUG(dbgs() << "*** to: " << *MI);
  130. MO.setReg(SrcReg);
  131. MRI->clearKillFlags(SrcReg);
  132. DefMI->eraseFromParent();
  133. ++NumCoalesces;
  134. Changed = true;
  135. }
  136. return Changed;
  137. }
  138. bool
  139. MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
  140. MachineBasicBlock::const_iterator I,
  141. MachineBasicBlock::const_iterator E) const {
  142. unsigned LookAheadLeft = LookAheadLimit;
  143. while (LookAheadLeft) {
  144. // Skip over dbg_value's.
  145. while (I != E && I->isDebugValue())
  146. ++I;
  147. if (I == E)
  148. // Reached end of block, register is obviously dead.
  149. return true;
  150. bool SeenDef = false;
  151. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  152. const MachineOperand &MO = I->getOperand(i);
  153. if (MO.isRegMask() && MO.clobbersPhysReg(Reg))
  154. SeenDef = true;
  155. if (!MO.isReg() || !MO.getReg())
  156. continue;
  157. if (!TRI->regsOverlap(MO.getReg(), Reg))
  158. continue;
  159. if (MO.isUse())
  160. // Found a use!
  161. return false;
  162. SeenDef = true;
  163. }
  164. if (SeenDef)
  165. // See a def of Reg (or an alias) before encountering any use, it's
  166. // trivially dead.
  167. return true;
  168. --LookAheadLeft;
  169. ++I;
  170. }
  171. return false;
  172. }
  173. /// hasLivePhysRegDefUses - Return true if the specified instruction read/write
  174. /// physical registers (except for dead defs of physical registers). It also
  175. /// returns the physical register def by reference if it's the only one and the
  176. /// instruction does not uses a physical register.
  177. bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
  178. const MachineBasicBlock *MBB,
  179. SmallSet<unsigned,8> &PhysRefs,
  180. SmallVectorImpl<unsigned> &PhysDefs,
  181. bool &PhysUseDef) const{
  182. // First, add all uses to PhysRefs.
  183. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  184. const MachineOperand &MO = MI->getOperand(i);
  185. if (!MO.isReg() || MO.isDef())
  186. continue;
  187. unsigned Reg = MO.getReg();
  188. if (!Reg)
  189. continue;
  190. if (TargetRegisterInfo::isVirtualRegister(Reg))
  191. continue;
  192. // Reading constant physregs is ok.
  193. if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
  194. for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
  195. PhysRefs.insert(*AI);
  196. }
  197. // Next, collect all defs into PhysDefs. If any is already in PhysRefs
  198. // (which currently contains only uses), set the PhysUseDef flag.
  199. PhysUseDef = false;
  200. MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
  201. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  202. const MachineOperand &MO = MI->getOperand(i);
  203. if (!MO.isReg() || !MO.isDef())
  204. continue;
  205. unsigned Reg = MO.getReg();
  206. if (!Reg)
  207. continue;
  208. if (TargetRegisterInfo::isVirtualRegister(Reg))
  209. continue;
  210. // Check against PhysRefs even if the def is "dead".
  211. if (PhysRefs.count(Reg))
  212. PhysUseDef = true;
  213. // If the def is dead, it's ok. But the def may not marked "dead". That's
  214. // common since this pass is run before livevariables. We can scan
  215. // forward a few instructions and check if it is obviously dead.
  216. if (!MO.isDead() && !isPhysDefTriviallyDead(Reg, I, MBB->end()))
  217. PhysDefs.push_back(Reg);
  218. }
  219. // Finally, add all defs to PhysRefs as well.
  220. for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i)
  221. for (MCRegAliasIterator AI(PhysDefs[i], TRI, true); AI.isValid(); ++AI)
  222. PhysRefs.insert(*AI);
  223. return !PhysRefs.empty();
  224. }
  225. bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
  226. SmallSet<unsigned,8> &PhysRefs,
  227. SmallVectorImpl<unsigned> &PhysDefs,
  228. bool &NonLocal) const {
  229. // For now conservatively returns false if the common subexpression is
  230. // not in the same basic block as the given instruction. The only exception
  231. // is if the common subexpression is in the sole predecessor block.
  232. const MachineBasicBlock *MBB = MI->getParent();
  233. const MachineBasicBlock *CSMBB = CSMI->getParent();
  234. bool CrossMBB = false;
  235. if (CSMBB != MBB) {
  236. if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB)
  237. return false;
  238. for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) {
  239. if (MRI->isAllocatable(PhysDefs[i]) || MRI->isReserved(PhysDefs[i]))
  240. // Avoid extending live range of physical registers if they are
  241. //allocatable or reserved.
  242. return false;
  243. }
  244. CrossMBB = true;
  245. }
  246. MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I);
  247. MachineBasicBlock::const_iterator E = MI;
  248. MachineBasicBlock::const_iterator EE = CSMBB->end();
  249. unsigned LookAheadLeft = LookAheadLimit;
  250. while (LookAheadLeft) {
  251. // Skip over dbg_value's.
  252. while (I != E && I != EE && I->isDebugValue())
  253. ++I;
  254. if (I == EE) {
  255. assert(CrossMBB && "Reaching end-of-MBB without finding MI?");
  256. (void)CrossMBB;
  257. CrossMBB = false;
  258. NonLocal = true;
  259. I = MBB->begin();
  260. EE = MBB->end();
  261. continue;
  262. }
  263. if (I == E)
  264. return true;
  265. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  266. const MachineOperand &MO = I->getOperand(i);
  267. // RegMasks go on instructions like calls that clobber lots of physregs.
  268. // Don't attempt to CSE across such an instruction.
  269. if (MO.isRegMask())
  270. return false;
  271. if (!MO.isReg() || !MO.isDef())
  272. continue;
  273. unsigned MOReg = MO.getReg();
  274. if (TargetRegisterInfo::isVirtualRegister(MOReg))
  275. continue;
  276. if (PhysRefs.count(MOReg))
  277. return false;
  278. }
  279. --LookAheadLeft;
  280. ++I;
  281. }
  282. return false;
  283. }
  284. bool MachineCSE::isCSECandidate(MachineInstr *MI) {
  285. if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
  286. MI->isKill() || MI->isInlineAsm() || MI->isDebugValue())
  287. return false;
  288. // Ignore copies.
  289. if (MI->isCopyLike())
  290. return false;
  291. // Ignore stuff that we obviously can't move.
  292. if (MI->mayStore() || MI->isCall() || MI->isTerminator() ||
  293. MI->hasUnmodeledSideEffects())
  294. return false;
  295. if (MI->mayLoad()) {
  296. // Okay, this instruction does a load. As a refinement, we allow the target
  297. // to decide whether the loaded value is actually a constant. If so, we can
  298. // actually use it as a load.
  299. if (!MI->isInvariantLoad(AA))
  300. // FIXME: we should be able to hoist loads with no other side effects if
  301. // there are no other instructions which can change memory in this loop.
  302. // This is a trivial form of alias analysis.
  303. return false;
  304. }
  305. return true;
  306. }
  307. /// isProfitableToCSE - Return true if it's profitable to eliminate MI with a
  308. /// common expression that defines Reg.
  309. bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
  310. MachineInstr *CSMI, MachineInstr *MI) {
  311. // FIXME: Heuristics that works around the lack the live range splitting.
  312. // If CSReg is used at all uses of Reg, CSE should not increase register
  313. // pressure of CSReg.
  314. bool MayIncreasePressure = true;
  315. if (TargetRegisterInfo::isVirtualRegister(CSReg) &&
  316. TargetRegisterInfo::isVirtualRegister(Reg)) {
  317. MayIncreasePressure = false;
  318. SmallPtrSet<MachineInstr*, 8> CSUses;
  319. for (MachineRegisterInfo::use_nodbg_iterator I =MRI->use_nodbg_begin(CSReg),
  320. E = MRI->use_nodbg_end(); I != E; ++I) {
  321. MachineInstr *Use = &*I;
  322. CSUses.insert(Use);
  323. }
  324. for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
  325. E = MRI->use_nodbg_end(); I != E; ++I) {
  326. MachineInstr *Use = &*I;
  327. if (!CSUses.count(Use)) {
  328. MayIncreasePressure = true;
  329. break;
  330. }
  331. }
  332. }
  333. if (!MayIncreasePressure) return true;
  334. // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
  335. // an immediate predecessor. We don't want to increase register pressure and
  336. // end up causing other computation to be spilled.
  337. if (MI->isAsCheapAsAMove()) {
  338. MachineBasicBlock *CSBB = CSMI->getParent();
  339. MachineBasicBlock *BB = MI->getParent();
  340. if (CSBB != BB && !CSBB->isSuccessor(BB))
  341. return false;
  342. }
  343. // Heuristics #2: If the expression doesn't not use a vr and the only use
  344. // of the redundant computation are copies, do not cse.
  345. bool HasVRegUse = false;
  346. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  347. const MachineOperand &MO = MI->getOperand(i);
  348. if (MO.isReg() && MO.isUse() &&
  349. TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
  350. HasVRegUse = true;
  351. break;
  352. }
  353. }
  354. if (!HasVRegUse) {
  355. bool HasNonCopyUse = false;
  356. for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
  357. E = MRI->use_nodbg_end(); I != E; ++I) {
  358. MachineInstr *Use = &*I;
  359. // Ignore copies.
  360. if (!Use->isCopyLike()) {
  361. HasNonCopyUse = true;
  362. break;
  363. }
  364. }
  365. if (!HasNonCopyUse)
  366. return false;
  367. }
  368. // Heuristics #3: If the common subexpression is used by PHIs, do not reuse
  369. // it unless the defined value is already used in the BB of the new use.
  370. bool HasPHI = false;
  371. SmallPtrSet<MachineBasicBlock*, 4> CSBBs;
  372. for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(CSReg),
  373. E = MRI->use_nodbg_end(); I != E; ++I) {
  374. MachineInstr *Use = &*I;
  375. HasPHI |= Use->isPHI();
  376. CSBBs.insert(Use->getParent());
  377. }
  378. if (!HasPHI)
  379. return true;
  380. return CSBBs.count(MI->getParent());
  381. }
  382. void MachineCSE::EnterScope(MachineBasicBlock *MBB) {
  383. DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
  384. ScopeType *Scope = new ScopeType(VNT);
  385. ScopeMap[MBB] = Scope;
  386. }
  387. void MachineCSE::ExitScope(MachineBasicBlock *MBB) {
  388. DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
  389. DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB);
  390. assert(SI != ScopeMap.end());
  391. delete SI->second;
  392. ScopeMap.erase(SI);
  393. }
  394. bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
  395. bool Changed = false;
  396. SmallVector<std::pair<unsigned, unsigned>, 8> CSEPairs;
  397. SmallVector<unsigned, 2> ImplicitDefsToUpdate;
  398. for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ) {
  399. MachineInstr *MI = &*I;
  400. ++I;
  401. if (!isCSECandidate(MI))
  402. continue;
  403. bool FoundCSE = VNT.count(MI);
  404. if (!FoundCSE) {
  405. // Look for trivial copy coalescing opportunities.
  406. if (PerformTrivialCoalescing(MI, MBB)) {
  407. Changed = true;
  408. // After coalescing MI itself may become a copy.
  409. if (MI->isCopyLike())
  410. continue;
  411. FoundCSE = VNT.count(MI);
  412. }
  413. }
  414. // Commute commutable instructions.
  415. bool Commuted = false;
  416. if (!FoundCSE && MI->isCommutable()) {
  417. MachineInstr *NewMI = TII->commuteInstruction(MI);
  418. if (NewMI) {
  419. Commuted = true;
  420. FoundCSE = VNT.count(NewMI);
  421. if (NewMI != MI) {
  422. // New instruction. It doesn't need to be kept.
  423. NewMI->eraseFromParent();
  424. Changed = true;
  425. } else if (!FoundCSE)
  426. // MI was changed but it didn't help, commute it back!
  427. (void)TII->commuteInstruction(MI);
  428. }
  429. }
  430. // If the instruction defines physical registers and the values *may* be
  431. // used, then it's not safe to replace it with a common subexpression.
  432. // It's also not safe if the instruction uses physical registers.
  433. bool CrossMBBPhysDef = false;
  434. SmallSet<unsigned, 8> PhysRefs;
  435. SmallVector<unsigned, 2> PhysDefs;
  436. bool PhysUseDef = false;
  437. if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs,
  438. PhysDefs, PhysUseDef)) {
  439. FoundCSE = false;
  440. // ... Unless the CS is local or is in the sole predecessor block
  441. // and it also defines the physical register which is not clobbered
  442. // in between and the physical register uses were not clobbered.
  443. // This can never be the case if the instruction both uses and
  444. // defines the same physical register, which was detected above.
  445. if (!PhysUseDef) {
  446. unsigned CSVN = VNT.lookup(MI);
  447. MachineInstr *CSMI = Exps[CSVN];
  448. if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
  449. FoundCSE = true;
  450. }
  451. }
  452. if (!FoundCSE) {
  453. VNT.insert(MI, CurrVN++);
  454. Exps.push_back(MI);
  455. continue;
  456. }
  457. // Found a common subexpression, eliminate it.
  458. unsigned CSVN = VNT.lookup(MI);
  459. MachineInstr *CSMI = Exps[CSVN];
  460. DEBUG(dbgs() << "Examining: " << *MI);
  461. DEBUG(dbgs() << "*** Found a common subexpression: " << *CSMI);
  462. // Check if it's profitable to perform this CSE.
  463. bool DoCSE = true;
  464. unsigned NumDefs = MI->getDesc().getNumDefs() +
  465. MI->getDesc().getNumImplicitDefs();
  466. for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) {
  467. MachineOperand &MO = MI->getOperand(i);
  468. if (!MO.isReg() || !MO.isDef())
  469. continue;
  470. unsigned OldReg = MO.getReg();
  471. unsigned NewReg = CSMI->getOperand(i).getReg();
  472. // Go through implicit defs of CSMI and MI, if a def is not dead at MI,
  473. // we should make sure it is not dead at CSMI.
  474. if (MO.isImplicit() && !MO.isDead() && CSMI->getOperand(i).isDead())
  475. ImplicitDefsToUpdate.push_back(i);
  476. if (OldReg == NewReg) {
  477. --NumDefs;
  478. continue;
  479. }
  480. assert(TargetRegisterInfo::isVirtualRegister(OldReg) &&
  481. TargetRegisterInfo::isVirtualRegister(NewReg) &&
  482. "Do not CSE physical register defs!");
  483. if (!isProfitableToCSE(NewReg, OldReg, CSMI, MI)) {
  484. DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
  485. DoCSE = false;
  486. break;
  487. }
  488. // Don't perform CSE if the result of the old instruction cannot exist
  489. // within the register class of the new instruction.
  490. const TargetRegisterClass *OldRC = MRI->getRegClass(OldReg);
  491. if (!MRI->constrainRegClass(NewReg, OldRC)) {
  492. DEBUG(dbgs() << "*** Not the same register class, avoid CSE!\n");
  493. DoCSE = false;
  494. break;
  495. }
  496. CSEPairs.push_back(std::make_pair(OldReg, NewReg));
  497. --NumDefs;
  498. }
  499. // Actually perform the elimination.
  500. if (DoCSE) {
  501. for (unsigned i = 0, e = CSEPairs.size(); i != e; ++i) {
  502. MRI->replaceRegWith(CSEPairs[i].first, CSEPairs[i].second);
  503. MRI->clearKillFlags(CSEPairs[i].second);
  504. }
  505. // Go through implicit defs of CSMI and MI, if a def is not dead at MI,
  506. // we should make sure it is not dead at CSMI.
  507. for (unsigned i = 0, e = ImplicitDefsToUpdate.size(); i != e; ++i)
  508. CSMI->getOperand(ImplicitDefsToUpdate[i]).setIsDead(false);
  509. if (CrossMBBPhysDef) {
  510. // Add physical register defs now coming in from a predecessor to MBB
  511. // livein list.
  512. while (!PhysDefs.empty()) {
  513. unsigned LiveIn = PhysDefs.pop_back_val();
  514. if (!MBB->isLiveIn(LiveIn))
  515. MBB->addLiveIn(LiveIn);
  516. }
  517. ++NumCrossBBCSEs;
  518. }
  519. MI->eraseFromParent();
  520. ++NumCSEs;
  521. if (!PhysRefs.empty())
  522. ++NumPhysCSEs;
  523. if (Commuted)
  524. ++NumCommutes;
  525. Changed = true;
  526. } else {
  527. VNT.insert(MI, CurrVN++);
  528. Exps.push_back(MI);
  529. }
  530. CSEPairs.clear();
  531. ImplicitDefsToUpdate.clear();
  532. }
  533. return Changed;
  534. }
  535. /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
  536. /// dominator tree node if its a leaf or all of its children are done. Walk
  537. /// up the dominator tree to destroy ancestors which are now done.
  538. void
  539. MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node,
  540. DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren) {
  541. if (OpenChildren[Node])
  542. return;
  543. // Pop scope.
  544. ExitScope(Node->getBlock());
  545. // Now traverse upwards to pop ancestors whose offsprings are all done.
  546. while (MachineDomTreeNode *Parent = Node->getIDom()) {
  547. unsigned Left = --OpenChildren[Parent];
  548. if (Left != 0)
  549. break;
  550. ExitScope(Parent->getBlock());
  551. Node = Parent;
  552. }
  553. }
  554. bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
  555. SmallVector<MachineDomTreeNode*, 32> Scopes;
  556. SmallVector<MachineDomTreeNode*, 8> WorkList;
  557. DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
  558. CurrVN = 0;
  559. // Perform a DFS walk to determine the order of visit.
  560. WorkList.push_back(Node);
  561. do {
  562. Node = WorkList.pop_back_val();
  563. Scopes.push_back(Node);
  564. const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
  565. unsigned NumChildren = Children.size();
  566. OpenChildren[Node] = NumChildren;
  567. for (unsigned i = 0; i != NumChildren; ++i) {
  568. MachineDomTreeNode *Child = Children[i];
  569. WorkList.push_back(Child);
  570. }
  571. } while (!WorkList.empty());
  572. // Now perform CSE.
  573. bool Changed = false;
  574. for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
  575. MachineDomTreeNode *Node = Scopes[i];
  576. MachineBasicBlock *MBB = Node->getBlock();
  577. EnterScope(MBB);
  578. Changed |= ProcessBlock(MBB);
  579. // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
  580. ExitScopeIfDone(Node, OpenChildren);
  581. }
  582. return Changed;
  583. }
  584. bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
  585. TII = MF.getTarget().getInstrInfo();
  586. TRI = MF.getTarget().getRegisterInfo();
  587. MRI = &MF.getRegInfo();
  588. AA = &getAnalysis<AliasAnalysis>();
  589. DT = &getAnalysis<MachineDominatorTree>();
  590. return PerformCSE(DT->getRootNode());
  591. }