MachineCSE.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. //===-- MachineCSE.cpp - Machine Common Subexpression Elimination Pass ----===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass performs global common subexpression elimination on machine
  11. // instructions using a scoped hash table based value numbering scheme. It
  12. // must be run while the machine function is still in SSA form.
  13. //
  14. //===----------------------------------------------------------------------===//
  15. #define DEBUG_TYPE "machine-cse"
  16. #include "llvm/CodeGen/Passes.h"
  17. #include "llvm/ADT/DenseMap.h"
  18. #include "llvm/ADT/ScopedHashTable.h"
  19. #include "llvm/ADT/SmallSet.h"
  20. #include "llvm/ADT/Statistic.h"
  21. #include "llvm/Analysis/AliasAnalysis.h"
  22. #include "llvm/CodeGen/MachineDominators.h"
  23. #include "llvm/CodeGen/MachineInstr.h"
  24. #include "llvm/CodeGen/MachineRegisterInfo.h"
  25. #include "llvm/Support/Debug.h"
  26. #include "llvm/Support/RecyclingAllocator.h"
  27. #include "llvm/Target/TargetInstrInfo.h"
  28. using namespace llvm;
  29. STATISTIC(NumCoalesces, "Number of copies coalesced");
  30. STATISTIC(NumCSEs, "Number of common subexpression eliminated");
  31. STATISTIC(NumPhysCSEs,
  32. "Number of physreg referencing common subexpr eliminated");
  33. STATISTIC(NumCrossBBCSEs,
  34. "Number of cross-MBB physreg referencing CS eliminated");
  35. STATISTIC(NumCommutes, "Number of copies coalesced after commuting");
  36. namespace {
  37. class MachineCSE : public MachineFunctionPass {
  38. const TargetInstrInfo *TII;
  39. const TargetRegisterInfo *TRI;
  40. AliasAnalysis *AA;
  41. MachineDominatorTree *DT;
  42. MachineRegisterInfo *MRI;
  43. public:
  44. static char ID; // Pass identification
  45. MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(5), CurrVN(0) {
  46. initializeMachineCSEPass(*PassRegistry::getPassRegistry());
  47. }
  48. virtual bool runOnMachineFunction(MachineFunction &MF);
  49. virtual void getAnalysisUsage(AnalysisUsage &AU) const {
  50. AU.setPreservesCFG();
  51. MachineFunctionPass::getAnalysisUsage(AU);
  52. AU.addRequired<AliasAnalysis>();
  53. AU.addPreservedID(MachineLoopInfoID);
  54. AU.addRequired<MachineDominatorTree>();
  55. AU.addPreserved<MachineDominatorTree>();
  56. }
  57. virtual void releaseMemory() {
  58. ScopeMap.clear();
  59. Exps.clear();
  60. }
  61. private:
  62. const unsigned LookAheadLimit;
  63. typedef RecyclingAllocator<BumpPtrAllocator,
  64. ScopedHashTableVal<MachineInstr*, unsigned> > AllocatorTy;
  65. typedef ScopedHashTable<MachineInstr*, unsigned,
  66. MachineInstrExpressionTrait, AllocatorTy> ScopedHTType;
  67. typedef ScopedHTType::ScopeTy ScopeType;
  68. DenseMap<MachineBasicBlock*, ScopeType*> ScopeMap;
  69. ScopedHTType VNT;
  70. SmallVector<MachineInstr*, 64> Exps;
  71. unsigned CurrVN;
  72. bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB);
  73. bool isPhysDefTriviallyDead(unsigned Reg,
  74. MachineBasicBlock::const_iterator I,
  75. MachineBasicBlock::const_iterator E) const;
  76. bool hasLivePhysRegDefUses(const MachineInstr *MI,
  77. const MachineBasicBlock *MBB,
  78. SmallSet<unsigned,8> &PhysRefs,
  79. SmallVectorImpl<unsigned> &PhysDefs,
  80. bool &PhysUseDef) const;
  81. bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
  82. SmallSet<unsigned,8> &PhysRefs,
  83. SmallVectorImpl<unsigned> &PhysDefs,
  84. bool &NonLocal) const;
  85. bool isCSECandidate(MachineInstr *MI);
  86. bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
  87. MachineInstr *CSMI, MachineInstr *MI);
  88. void EnterScope(MachineBasicBlock *MBB);
  89. void ExitScope(MachineBasicBlock *MBB);
  90. bool ProcessBlock(MachineBasicBlock *MBB);
  91. void ExitScopeIfDone(MachineDomTreeNode *Node,
  92. DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren);
  93. bool PerformCSE(MachineDomTreeNode *Node);
  94. };
  95. } // end anonymous namespace
  96. char MachineCSE::ID = 0;
  97. char &llvm::MachineCSEID = MachineCSE::ID;
  98. INITIALIZE_PASS_BEGIN(MachineCSE, "machine-cse",
  99. "Machine Common Subexpression Elimination", false, false)
  100. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  101. INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
  102. INITIALIZE_PASS_END(MachineCSE, "machine-cse",
  103. "Machine Common Subexpression Elimination", false, false)
  104. bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
  105. MachineBasicBlock *MBB) {
  106. bool Changed = false;
  107. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  108. MachineOperand &MO = MI->getOperand(i);
  109. if (!MO.isReg() || !MO.isUse())
  110. continue;
  111. unsigned Reg = MO.getReg();
  112. if (!TargetRegisterInfo::isVirtualRegister(Reg))
  113. continue;
  114. if (!MRI->hasOneNonDBGUse(Reg))
  115. // Only coalesce single use copies. This ensure the copy will be
  116. // deleted.
  117. continue;
  118. MachineInstr *DefMI = MRI->getVRegDef(Reg);
  119. if (!DefMI->isCopy())
  120. continue;
  121. unsigned SrcReg = DefMI->getOperand(1).getReg();
  122. if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
  123. continue;
  124. if (DefMI->getOperand(0).getSubReg())
  125. continue;
  126. unsigned SrcSubReg = DefMI->getOperand(1).getSubReg();
  127. const TargetRegisterClass *RC = MRI->getRegClass(Reg);
  128. if (SrcSubReg)
  129. RC = TRI->getMatchingSuperRegClass(MRI->getRegClass(SrcReg), RC,
  130. SrcSubReg);
  131. if (!MRI->constrainRegClass(SrcReg, RC))
  132. continue;
  133. DEBUG(dbgs() << "Coalescing: " << *DefMI);
  134. DEBUG(dbgs() << "*** to: " << *MI);
  135. MO.substVirtReg(SrcReg, SrcSubReg, *TRI);
  136. MRI->clearKillFlags(SrcReg);
  137. DefMI->eraseFromParent();
  138. ++NumCoalesces;
  139. Changed = true;
  140. }
  141. return Changed;
  142. }
  143. bool
  144. MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
  145. MachineBasicBlock::const_iterator I,
  146. MachineBasicBlock::const_iterator E) const {
  147. unsigned LookAheadLeft = LookAheadLimit;
  148. while (LookAheadLeft) {
  149. // Skip over dbg_value's.
  150. while (I != E && I->isDebugValue())
  151. ++I;
  152. if (I == E)
  153. // Reached end of block, register is obviously dead.
  154. return true;
  155. bool SeenDef = false;
  156. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  157. const MachineOperand &MO = I->getOperand(i);
  158. if (MO.isRegMask() && MO.clobbersPhysReg(Reg))
  159. SeenDef = true;
  160. if (!MO.isReg() || !MO.getReg())
  161. continue;
  162. if (!TRI->regsOverlap(MO.getReg(), Reg))
  163. continue;
  164. if (MO.isUse())
  165. // Found a use!
  166. return false;
  167. SeenDef = true;
  168. }
  169. if (SeenDef)
  170. // See a def of Reg (or an alias) before encountering any use, it's
  171. // trivially dead.
  172. return true;
  173. --LookAheadLeft;
  174. ++I;
  175. }
  176. return false;
  177. }
  178. /// hasLivePhysRegDefUses - Return true if the specified instruction read/write
  179. /// physical registers (except for dead defs of physical registers). It also
  180. /// returns the physical register def by reference if it's the only one and the
  181. /// instruction does not uses a physical register.
  182. bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
  183. const MachineBasicBlock *MBB,
  184. SmallSet<unsigned,8> &PhysRefs,
  185. SmallVectorImpl<unsigned> &PhysDefs,
  186. bool &PhysUseDef) const{
  187. // First, add all uses to PhysRefs.
  188. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  189. const MachineOperand &MO = MI->getOperand(i);
  190. if (!MO.isReg() || MO.isDef())
  191. continue;
  192. unsigned Reg = MO.getReg();
  193. if (!Reg)
  194. continue;
  195. if (TargetRegisterInfo::isVirtualRegister(Reg))
  196. continue;
  197. // Reading constant physregs is ok.
  198. if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
  199. for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
  200. PhysRefs.insert(*AI);
  201. }
  202. // Next, collect all defs into PhysDefs. If any is already in PhysRefs
  203. // (which currently contains only uses), set the PhysUseDef flag.
  204. PhysUseDef = false;
  205. MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
  206. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  207. const MachineOperand &MO = MI->getOperand(i);
  208. if (!MO.isReg() || !MO.isDef())
  209. continue;
  210. unsigned Reg = MO.getReg();
  211. if (!Reg)
  212. continue;
  213. if (TargetRegisterInfo::isVirtualRegister(Reg))
  214. continue;
  215. // Check against PhysRefs even if the def is "dead".
  216. if (PhysRefs.count(Reg))
  217. PhysUseDef = true;
  218. // If the def is dead, it's ok. But the def may not marked "dead". That's
  219. // common since this pass is run before livevariables. We can scan
  220. // forward a few instructions and check if it is obviously dead.
  221. if (!MO.isDead() && !isPhysDefTriviallyDead(Reg, I, MBB->end()))
  222. PhysDefs.push_back(Reg);
  223. }
  224. // Finally, add all defs to PhysRefs as well.
  225. for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i)
  226. for (MCRegAliasIterator AI(PhysDefs[i], TRI, true); AI.isValid(); ++AI)
  227. PhysRefs.insert(*AI);
  228. return !PhysRefs.empty();
  229. }
  230. bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
  231. SmallSet<unsigned,8> &PhysRefs,
  232. SmallVectorImpl<unsigned> &PhysDefs,
  233. bool &NonLocal) const {
  234. // For now conservatively returns false if the common subexpression is
  235. // not in the same basic block as the given instruction. The only exception
  236. // is if the common subexpression is in the sole predecessor block.
  237. const MachineBasicBlock *MBB = MI->getParent();
  238. const MachineBasicBlock *CSMBB = CSMI->getParent();
  239. bool CrossMBB = false;
  240. if (CSMBB != MBB) {
  241. if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB)
  242. return false;
  243. for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) {
  244. if (MRI->isAllocatable(PhysDefs[i]) || MRI->isReserved(PhysDefs[i]))
  245. // Avoid extending live range of physical registers if they are
  246. //allocatable or reserved.
  247. return false;
  248. }
  249. CrossMBB = true;
  250. }
  251. MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I);
  252. MachineBasicBlock::const_iterator E = MI;
  253. MachineBasicBlock::const_iterator EE = CSMBB->end();
  254. unsigned LookAheadLeft = LookAheadLimit;
  255. while (LookAheadLeft) {
  256. // Skip over dbg_value's.
  257. while (I != E && I != EE && I->isDebugValue())
  258. ++I;
  259. if (I == EE) {
  260. assert(CrossMBB && "Reaching end-of-MBB without finding MI?");
  261. (void)CrossMBB;
  262. CrossMBB = false;
  263. NonLocal = true;
  264. I = MBB->begin();
  265. EE = MBB->end();
  266. continue;
  267. }
  268. if (I == E)
  269. return true;
  270. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  271. const MachineOperand &MO = I->getOperand(i);
  272. // RegMasks go on instructions like calls that clobber lots of physregs.
  273. // Don't attempt to CSE across such an instruction.
  274. if (MO.isRegMask())
  275. return false;
  276. if (!MO.isReg() || !MO.isDef())
  277. continue;
  278. unsigned MOReg = MO.getReg();
  279. if (TargetRegisterInfo::isVirtualRegister(MOReg))
  280. continue;
  281. if (PhysRefs.count(MOReg))
  282. return false;
  283. }
  284. --LookAheadLeft;
  285. ++I;
  286. }
  287. return false;
  288. }
  289. bool MachineCSE::isCSECandidate(MachineInstr *MI) {
  290. if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
  291. MI->isKill() || MI->isInlineAsm() || MI->isDebugValue())
  292. return false;
  293. // Ignore copies.
  294. if (MI->isCopyLike())
  295. return false;
  296. // Ignore stuff that we obviously can't move.
  297. if (MI->mayStore() || MI->isCall() || MI->isTerminator() ||
  298. MI->hasUnmodeledSideEffects())
  299. return false;
  300. if (MI->mayLoad()) {
  301. // Okay, this instruction does a load. As a refinement, we allow the target
  302. // to decide whether the loaded value is actually a constant. If so, we can
  303. // actually use it as a load.
  304. if (!MI->isInvariantLoad(AA))
  305. // FIXME: we should be able to hoist loads with no other side effects if
  306. // there are no other instructions which can change memory in this loop.
  307. // This is a trivial form of alias analysis.
  308. return false;
  309. }
  310. return true;
  311. }
  312. /// isProfitableToCSE - Return true if it's profitable to eliminate MI with a
  313. /// common expression that defines Reg.
  314. bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
  315. MachineInstr *CSMI, MachineInstr *MI) {
  316. // FIXME: Heuristics that works around the lack the live range splitting.
  317. // If CSReg is used at all uses of Reg, CSE should not increase register
  318. // pressure of CSReg.
  319. bool MayIncreasePressure = true;
  320. if (TargetRegisterInfo::isVirtualRegister(CSReg) &&
  321. TargetRegisterInfo::isVirtualRegister(Reg)) {
  322. MayIncreasePressure = false;
  323. SmallPtrSet<MachineInstr*, 8> CSUses;
  324. for (MachineRegisterInfo::use_nodbg_iterator I =MRI->use_nodbg_begin(CSReg),
  325. E = MRI->use_nodbg_end(); I != E; ++I) {
  326. MachineInstr *Use = &*I;
  327. CSUses.insert(Use);
  328. }
  329. for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
  330. E = MRI->use_nodbg_end(); I != E; ++I) {
  331. MachineInstr *Use = &*I;
  332. if (!CSUses.count(Use)) {
  333. MayIncreasePressure = true;
  334. break;
  335. }
  336. }
  337. }
  338. if (!MayIncreasePressure) return true;
  339. // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
  340. // an immediate predecessor. We don't want to increase register pressure and
  341. // end up causing other computation to be spilled.
  342. if (MI->isAsCheapAsAMove()) {
  343. MachineBasicBlock *CSBB = CSMI->getParent();
  344. MachineBasicBlock *BB = MI->getParent();
  345. if (CSBB != BB && !CSBB->isSuccessor(BB))
  346. return false;
  347. }
  348. // Heuristics #2: If the expression doesn't not use a vr and the only use
  349. // of the redundant computation are copies, do not cse.
  350. bool HasVRegUse = false;
  351. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  352. const MachineOperand &MO = MI->getOperand(i);
  353. if (MO.isReg() && MO.isUse() &&
  354. TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
  355. HasVRegUse = true;
  356. break;
  357. }
  358. }
  359. if (!HasVRegUse) {
  360. bool HasNonCopyUse = false;
  361. for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
  362. E = MRI->use_nodbg_end(); I != E; ++I) {
  363. MachineInstr *Use = &*I;
  364. // Ignore copies.
  365. if (!Use->isCopyLike()) {
  366. HasNonCopyUse = true;
  367. break;
  368. }
  369. }
  370. if (!HasNonCopyUse)
  371. return false;
  372. }
  373. // Heuristics #3: If the common subexpression is used by PHIs, do not reuse
  374. // it unless the defined value is already used in the BB of the new use.
  375. bool HasPHI = false;
  376. SmallPtrSet<MachineBasicBlock*, 4> CSBBs;
  377. for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(CSReg),
  378. E = MRI->use_nodbg_end(); I != E; ++I) {
  379. MachineInstr *Use = &*I;
  380. HasPHI |= Use->isPHI();
  381. CSBBs.insert(Use->getParent());
  382. }
  383. if (!HasPHI)
  384. return true;
  385. return CSBBs.count(MI->getParent());
  386. }
  387. void MachineCSE::EnterScope(MachineBasicBlock *MBB) {
  388. DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
  389. ScopeType *Scope = new ScopeType(VNT);
  390. ScopeMap[MBB] = Scope;
  391. }
  392. void MachineCSE::ExitScope(MachineBasicBlock *MBB) {
  393. DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
  394. DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB);
  395. assert(SI != ScopeMap.end());
  396. delete SI->second;
  397. ScopeMap.erase(SI);
  398. }
  399. bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
  400. bool Changed = false;
  401. SmallVector<std::pair<unsigned, unsigned>, 8> CSEPairs;
  402. SmallVector<unsigned, 2> ImplicitDefsToUpdate;
  403. for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ) {
  404. MachineInstr *MI = &*I;
  405. ++I;
  406. if (!isCSECandidate(MI))
  407. continue;
  408. bool FoundCSE = VNT.count(MI);
  409. if (!FoundCSE) {
  410. // Look for trivial copy coalescing opportunities.
  411. if (PerformTrivialCoalescing(MI, MBB)) {
  412. Changed = true;
  413. // After coalescing MI itself may become a copy.
  414. if (MI->isCopyLike())
  415. continue;
  416. FoundCSE = VNT.count(MI);
  417. }
  418. }
  419. // Commute commutable instructions.
  420. bool Commuted = false;
  421. if (!FoundCSE && MI->isCommutable()) {
  422. MachineInstr *NewMI = TII->commuteInstruction(MI);
  423. if (NewMI) {
  424. Commuted = true;
  425. FoundCSE = VNT.count(NewMI);
  426. if (NewMI != MI) {
  427. // New instruction. It doesn't need to be kept.
  428. NewMI->eraseFromParent();
  429. Changed = true;
  430. } else if (!FoundCSE)
  431. // MI was changed but it didn't help, commute it back!
  432. (void)TII->commuteInstruction(MI);
  433. }
  434. }
  435. // If the instruction defines physical registers and the values *may* be
  436. // used, then it's not safe to replace it with a common subexpression.
  437. // It's also not safe if the instruction uses physical registers.
  438. bool CrossMBBPhysDef = false;
  439. SmallSet<unsigned, 8> PhysRefs;
  440. SmallVector<unsigned, 2> PhysDefs;
  441. bool PhysUseDef = false;
  442. if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs,
  443. PhysDefs, PhysUseDef)) {
  444. FoundCSE = false;
  445. // ... Unless the CS is local or is in the sole predecessor block
  446. // and it also defines the physical register which is not clobbered
  447. // in between and the physical register uses were not clobbered.
  448. // This can never be the case if the instruction both uses and
  449. // defines the same physical register, which was detected above.
  450. if (!PhysUseDef) {
  451. unsigned CSVN = VNT.lookup(MI);
  452. MachineInstr *CSMI = Exps[CSVN];
  453. if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
  454. FoundCSE = true;
  455. }
  456. }
  457. if (!FoundCSE) {
  458. VNT.insert(MI, CurrVN++);
  459. Exps.push_back(MI);
  460. continue;
  461. }
  462. // Found a common subexpression, eliminate it.
  463. unsigned CSVN = VNT.lookup(MI);
  464. MachineInstr *CSMI = Exps[CSVN];
  465. DEBUG(dbgs() << "Examining: " << *MI);
  466. DEBUG(dbgs() << "*** Found a common subexpression: " << *CSMI);
  467. // Check if it's profitable to perform this CSE.
  468. bool DoCSE = true;
  469. unsigned NumDefs = MI->getDesc().getNumDefs() +
  470. MI->getDesc().getNumImplicitDefs();
  471. for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) {
  472. MachineOperand &MO = MI->getOperand(i);
  473. if (!MO.isReg() || !MO.isDef())
  474. continue;
  475. unsigned OldReg = MO.getReg();
  476. unsigned NewReg = CSMI->getOperand(i).getReg();
  477. // Go through implicit defs of CSMI and MI, if a def is not dead at MI,
  478. // we should make sure it is not dead at CSMI.
  479. if (MO.isImplicit() && !MO.isDead() && CSMI->getOperand(i).isDead())
  480. ImplicitDefsToUpdate.push_back(i);
  481. if (OldReg == NewReg) {
  482. --NumDefs;
  483. continue;
  484. }
  485. assert(TargetRegisterInfo::isVirtualRegister(OldReg) &&
  486. TargetRegisterInfo::isVirtualRegister(NewReg) &&
  487. "Do not CSE physical register defs!");
  488. if (!isProfitableToCSE(NewReg, OldReg, CSMI, MI)) {
  489. DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
  490. DoCSE = false;
  491. break;
  492. }
  493. // Don't perform CSE if the result of the old instruction cannot exist
  494. // within the register class of the new instruction.
  495. const TargetRegisterClass *OldRC = MRI->getRegClass(OldReg);
  496. if (!MRI->constrainRegClass(NewReg, OldRC)) {
  497. DEBUG(dbgs() << "*** Not the same register class, avoid CSE!\n");
  498. DoCSE = false;
  499. break;
  500. }
  501. CSEPairs.push_back(std::make_pair(OldReg, NewReg));
  502. --NumDefs;
  503. }
  504. // Actually perform the elimination.
  505. if (DoCSE) {
  506. for (unsigned i = 0, e = CSEPairs.size(); i != e; ++i) {
  507. MRI->replaceRegWith(CSEPairs[i].first, CSEPairs[i].second);
  508. MRI->clearKillFlags(CSEPairs[i].second);
  509. }
  510. // Go through implicit defs of CSMI and MI, if a def is not dead at MI,
  511. // we should make sure it is not dead at CSMI.
  512. for (unsigned i = 0, e = ImplicitDefsToUpdate.size(); i != e; ++i)
  513. CSMI->getOperand(ImplicitDefsToUpdate[i]).setIsDead(false);
  514. if (CrossMBBPhysDef) {
  515. // Add physical register defs now coming in from a predecessor to MBB
  516. // livein list.
  517. while (!PhysDefs.empty()) {
  518. unsigned LiveIn = PhysDefs.pop_back_val();
  519. if (!MBB->isLiveIn(LiveIn))
  520. MBB->addLiveIn(LiveIn);
  521. }
  522. ++NumCrossBBCSEs;
  523. }
  524. MI->eraseFromParent();
  525. ++NumCSEs;
  526. if (!PhysRefs.empty())
  527. ++NumPhysCSEs;
  528. if (Commuted)
  529. ++NumCommutes;
  530. Changed = true;
  531. } else {
  532. VNT.insert(MI, CurrVN++);
  533. Exps.push_back(MI);
  534. }
  535. CSEPairs.clear();
  536. ImplicitDefsToUpdate.clear();
  537. }
  538. return Changed;
  539. }
  540. /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
  541. /// dominator tree node if its a leaf or all of its children are done. Walk
  542. /// up the dominator tree to destroy ancestors which are now done.
  543. void
  544. MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node,
  545. DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren) {
  546. if (OpenChildren[Node])
  547. return;
  548. // Pop scope.
  549. ExitScope(Node->getBlock());
  550. // Now traverse upwards to pop ancestors whose offsprings are all done.
  551. while (MachineDomTreeNode *Parent = Node->getIDom()) {
  552. unsigned Left = --OpenChildren[Parent];
  553. if (Left != 0)
  554. break;
  555. ExitScope(Parent->getBlock());
  556. Node = Parent;
  557. }
  558. }
  559. bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
  560. SmallVector<MachineDomTreeNode*, 32> Scopes;
  561. SmallVector<MachineDomTreeNode*, 8> WorkList;
  562. DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
  563. CurrVN = 0;
  564. // Perform a DFS walk to determine the order of visit.
  565. WorkList.push_back(Node);
  566. do {
  567. Node = WorkList.pop_back_val();
  568. Scopes.push_back(Node);
  569. const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
  570. unsigned NumChildren = Children.size();
  571. OpenChildren[Node] = NumChildren;
  572. for (unsigned i = 0; i != NumChildren; ++i) {
  573. MachineDomTreeNode *Child = Children[i];
  574. WorkList.push_back(Child);
  575. }
  576. } while (!WorkList.empty());
  577. // Now perform CSE.
  578. bool Changed = false;
  579. for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
  580. MachineDomTreeNode *Node = Scopes[i];
  581. MachineBasicBlock *MBB = Node->getBlock();
  582. EnterScope(MBB);
  583. Changed |= ProcessBlock(MBB);
  584. // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
  585. ExitScopeIfDone(Node, OpenChildren);
  586. }
  587. return Changed;
  588. }
  589. bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
  590. TII = MF.getTarget().getInstrInfo();
  591. TRI = MF.getTarget().getRegisterInfo();
  592. MRI = &MF.getRegInfo();
  593. AA = &getAnalysis<AliasAnalysis>();
  594. DT = &getAnalysis<MachineDominatorTree>();
  595. return PerformCSE(DT->getRootNode());
  596. }