MachineSink.cpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. //===-- MachineSink.cpp - Sinking for machine instructions ----------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass moves instructions into successor blocks when possible, so that
  11. // they aren't executed on paths where their results aren't needed.
  12. //
  13. // This pass is not intended to be a replacement or a complete alternative
  14. // for an LLVM-IR-level sinking pass. It is only designed to sink simple
  15. // constructs that are not exposed before lowering and instruction selection.
  16. //
  17. //===----------------------------------------------------------------------===//
  18. #include "llvm/CodeGen/Passes.h"
  19. #include "llvm/ADT/SetVector.h"
  20. #include "llvm/ADT/SmallSet.h"
  21. #include "llvm/ADT/SparseBitVector.h"
  22. #include "llvm/ADT/Statistic.h"
  23. #include "llvm/Analysis/AliasAnalysis.h"
  24. #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
  25. #include "llvm/CodeGen/MachineDominators.h"
  26. #include "llvm/CodeGen/MachineLoopInfo.h"
  27. #include "llvm/CodeGen/MachinePostDominators.h"
  28. #include "llvm/CodeGen/MachineRegisterInfo.h"
  29. #include "llvm/IR/LLVMContext.h"
  30. #include "llvm/Support/CommandLine.h"
  31. #include "llvm/Support/Debug.h"
  32. #include "llvm/Support/raw_ostream.h"
  33. #include "llvm/Target/TargetInstrInfo.h"
  34. #include "llvm/Target/TargetRegisterInfo.h"
  35. #include "llvm/Target/TargetSubtargetInfo.h"
  36. using namespace llvm;
  37. #define DEBUG_TYPE "machine-sink"
  38. static cl::opt<bool>
  39. SplitEdges("machine-sink-split",
  40. cl::desc("Split critical edges during machine sinking"),
  41. cl::init(true), cl::Hidden);
  42. static cl::opt<bool>
  43. UseBlockFreqInfo("machine-sink-bfi",
  44. cl::desc("Use block frequency info to find successors to sink"),
  45. cl::init(true), cl::Hidden);
  46. STATISTIC(NumSunk, "Number of machine instructions sunk");
  47. STATISTIC(NumSplit, "Number of critical edges split");
  48. STATISTIC(NumCoalesces, "Number of copies coalesced");
  49. namespace {
  50. class MachineSinking : public MachineFunctionPass {
  51. const TargetInstrInfo *TII;
  52. const TargetRegisterInfo *TRI;
  53. MachineRegisterInfo *MRI; // Machine register information
  54. MachineDominatorTree *DT; // Machine dominator tree
  55. MachinePostDominatorTree *PDT; // Machine post dominator tree
  56. MachineLoopInfo *LI;
  57. const MachineBlockFrequencyInfo *MBFI;
  58. AliasAnalysis *AA;
  59. // Remember which edges have been considered for breaking.
  60. SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8>
  61. CEBCandidates;
  62. // Remember which edges we are about to split.
  63. // This is different from CEBCandidates since those edges
  64. // will be split.
  65. SetVector<std::pair<MachineBasicBlock*,MachineBasicBlock*> > ToSplit;
  66. SparseBitVector<> RegsToClearKillFlags;
  67. typedef std::map<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>
  68. AllSuccsCache;
  69. public:
  70. static char ID; // Pass identification
  71. MachineSinking() : MachineFunctionPass(ID) {
  72. initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
  73. }
  74. bool runOnMachineFunction(MachineFunction &MF) override;
  75. void getAnalysisUsage(AnalysisUsage &AU) const override {
  76. AU.setPreservesCFG();
  77. MachineFunctionPass::getAnalysisUsage(AU);
  78. AU.addRequired<AAResultsWrapperPass>();
  79. AU.addRequired<MachineDominatorTree>();
  80. AU.addRequired<MachinePostDominatorTree>();
  81. AU.addRequired<MachineLoopInfo>();
  82. AU.addPreserved<MachineDominatorTree>();
  83. AU.addPreserved<MachinePostDominatorTree>();
  84. AU.addPreserved<MachineLoopInfo>();
  85. if (UseBlockFreqInfo)
  86. AU.addRequired<MachineBlockFrequencyInfo>();
  87. }
  88. void releaseMemory() override {
  89. CEBCandidates.clear();
  90. }
  91. private:
  92. bool ProcessBlock(MachineBasicBlock &MBB);
  93. bool isWorthBreakingCriticalEdge(MachineInstr *MI,
  94. MachineBasicBlock *From,
  95. MachineBasicBlock *To);
  96. /// \brief Postpone the splitting of the given critical
  97. /// edge (\p From, \p To).
  98. ///
  99. /// We do not split the edges on the fly. Indeed, this invalidates
  100. /// the dominance information and thus triggers a lot of updates
  101. /// of that information underneath.
  102. /// Instead, we postpone all the splits after each iteration of
  103. /// the main loop. That way, the information is at least valid
  104. /// for the lifetime of an iteration.
  105. ///
  106. /// \return True if the edge is marked as toSplit, false otherwise.
  107. /// False can be returned if, for instance, this is not profitable.
  108. bool PostponeSplitCriticalEdge(MachineInstr *MI,
  109. MachineBasicBlock *From,
  110. MachineBasicBlock *To,
  111. bool BreakPHIEdge);
  112. bool SinkInstruction(MachineInstr *MI, bool &SawStore,
  113. AllSuccsCache &AllSuccessors);
  114. bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
  115. MachineBasicBlock *DefMBB,
  116. bool &BreakPHIEdge, bool &LocalUse) const;
  117. MachineBasicBlock *FindSuccToSinkTo(MachineInstr *MI, MachineBasicBlock *MBB,
  118. bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
  119. bool isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
  120. MachineBasicBlock *MBB,
  121. MachineBasicBlock *SuccToSinkTo,
  122. AllSuccsCache &AllSuccessors);
  123. bool PerformTrivialForwardCoalescing(MachineInstr *MI,
  124. MachineBasicBlock *MBB);
  125. SmallVector<MachineBasicBlock *, 4> &
  126. GetAllSortedSuccessors(MachineInstr *MI, MachineBasicBlock *MBB,
  127. AllSuccsCache &AllSuccessors) const;
  128. };
  129. } // end anonymous namespace
  130. char MachineSinking::ID = 0;
  131. char &llvm::MachineSinkingID = MachineSinking::ID;
  132. INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink",
  133. "Machine code sinking", false, false)
  134. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  135. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  136. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  137. INITIALIZE_PASS_END(MachineSinking, "machine-sink",
  138. "Machine code sinking", false, false)
  139. bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI,
  140. MachineBasicBlock *MBB) {
  141. if (!MI->isCopy())
  142. return false;
  143. unsigned SrcReg = MI->getOperand(1).getReg();
  144. unsigned DstReg = MI->getOperand(0).getReg();
  145. if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
  146. !TargetRegisterInfo::isVirtualRegister(DstReg) ||
  147. !MRI->hasOneNonDBGUse(SrcReg))
  148. return false;
  149. const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
  150. const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
  151. if (SRC != DRC)
  152. return false;
  153. MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
  154. if (DefMI->isCopyLike())
  155. return false;
  156. DEBUG(dbgs() << "Coalescing: " << *DefMI);
  157. DEBUG(dbgs() << "*** to: " << *MI);
  158. MRI->replaceRegWith(DstReg, SrcReg);
  159. MI->eraseFromParent();
  160. // Conservatively, clear any kill flags, since it's possible that they are no
  161. // longer correct.
  162. MRI->clearKillFlags(SrcReg);
  163. ++NumCoalesces;
  164. return true;
  165. }
  166. /// AllUsesDominatedByBlock - Return true if all uses of the specified register
  167. /// occur in blocks dominated by the specified block. If any use is in the
  168. /// definition block, then return false since it is never legal to move def
  169. /// after uses.
  170. bool
  171. MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
  172. MachineBasicBlock *MBB,
  173. MachineBasicBlock *DefMBB,
  174. bool &BreakPHIEdge,
  175. bool &LocalUse) const {
  176. assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
  177. "Only makes sense for vregs");
  178. // Ignore debug uses because debug info doesn't affect the code.
  179. if (MRI->use_nodbg_empty(Reg))
  180. return true;
  181. // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
  182. // into and they are all PHI nodes. In this case, machine-sink must break
  183. // the critical edge first. e.g.
  184. //
  185. // BB#1: derived from LLVM BB %bb4.preheader
  186. // Predecessors according to CFG: BB#0
  187. // ...
  188. // %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
  189. // ...
  190. // JE_4 <BB#37>, %EFLAGS<imp-use>
  191. // Successors according to CFG: BB#37 BB#2
  192. //
  193. // BB#2: derived from LLVM BB %bb.nph
  194. // Predecessors according to CFG: BB#0 BB#1
  195. // %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
  196. BreakPHIEdge = true;
  197. for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
  198. MachineInstr *UseInst = MO.getParent();
  199. unsigned OpNo = &MO - &UseInst->getOperand(0);
  200. MachineBasicBlock *UseBlock = UseInst->getParent();
  201. if (!(UseBlock == MBB && UseInst->isPHI() &&
  202. UseInst->getOperand(OpNo+1).getMBB() == DefMBB)) {
  203. BreakPHIEdge = false;
  204. break;
  205. }
  206. }
  207. if (BreakPHIEdge)
  208. return true;
  209. for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
  210. // Determine the block of the use.
  211. MachineInstr *UseInst = MO.getParent();
  212. unsigned OpNo = &MO - &UseInst->getOperand(0);
  213. MachineBasicBlock *UseBlock = UseInst->getParent();
  214. if (UseInst->isPHI()) {
  215. // PHI nodes use the operand in the predecessor block, not the block with
  216. // the PHI.
  217. UseBlock = UseInst->getOperand(OpNo+1).getMBB();
  218. } else if (UseBlock == DefMBB) {
  219. LocalUse = true;
  220. return false;
  221. }
  222. // Check that it dominates.
  223. if (!DT->dominates(MBB, UseBlock))
  224. return false;
  225. }
  226. return true;
  227. }
  228. bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
  229. if (skipOptnoneFunction(*MF.getFunction()))
  230. return false;
  231. DEBUG(dbgs() << "******** Machine Sinking ********\n");
  232. TII = MF.getSubtarget().getInstrInfo();
  233. TRI = MF.getSubtarget().getRegisterInfo();
  234. MRI = &MF.getRegInfo();
  235. DT = &getAnalysis<MachineDominatorTree>();
  236. PDT = &getAnalysis<MachinePostDominatorTree>();
  237. LI = &getAnalysis<MachineLoopInfo>();
  238. MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr;
  239. AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  240. bool EverMadeChange = false;
  241. while (1) {
  242. bool MadeChange = false;
  243. // Process all basic blocks.
  244. CEBCandidates.clear();
  245. ToSplit.clear();
  246. for (auto &MBB: MF)
  247. MadeChange |= ProcessBlock(MBB);
  248. // If we have anything we marked as toSplit, split it now.
  249. for (auto &Pair : ToSplit) {
  250. auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, this);
  251. if (NewSucc != nullptr) {
  252. DEBUG(dbgs() << " *** Splitting critical edge:"
  253. " BB#" << Pair.first->getNumber()
  254. << " -- BB#" << NewSucc->getNumber()
  255. << " -- BB#" << Pair.second->getNumber() << '\n');
  256. MadeChange = true;
  257. ++NumSplit;
  258. } else
  259. DEBUG(dbgs() << " *** Not legal to break critical edge\n");
  260. }
  261. // If this iteration over the code changed anything, keep iterating.
  262. if (!MadeChange) break;
  263. EverMadeChange = true;
  264. }
  265. // Now clear any kill flags for recorded registers.
  266. for (auto I : RegsToClearKillFlags)
  267. MRI->clearKillFlags(I);
  268. RegsToClearKillFlags.clear();
  269. return EverMadeChange;
  270. }
  271. bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
  272. // Can't sink anything out of a block that has less than two successors.
  273. if (MBB.succ_size() <= 1 || MBB.empty()) return false;
  274. // Don't bother sinking code out of unreachable blocks. In addition to being
  275. // unprofitable, it can also lead to infinite looping, because in an
  276. // unreachable loop there may be nowhere to stop.
  277. if (!DT->isReachableFromEntry(&MBB)) return false;
  278. bool MadeChange = false;
  279. // Cache all successors, sorted by frequency info and loop depth.
  280. AllSuccsCache AllSuccessors;
  281. // Walk the basic block bottom-up. Remember if we saw a store.
  282. MachineBasicBlock::iterator I = MBB.end();
  283. --I;
  284. bool ProcessedBegin, SawStore = false;
  285. do {
  286. MachineInstr *MI = I; // The instruction to sink.
  287. // Predecrement I (if it's not begin) so that it isn't invalidated by
  288. // sinking.
  289. ProcessedBegin = I == MBB.begin();
  290. if (!ProcessedBegin)
  291. --I;
  292. if (MI->isDebugValue())
  293. continue;
  294. bool Joined = PerformTrivialForwardCoalescing(MI, &MBB);
  295. if (Joined) {
  296. MadeChange = true;
  297. continue;
  298. }
  299. if (SinkInstruction(MI, SawStore, AllSuccessors)) {
  300. ++NumSunk;
  301. MadeChange = true;
  302. }
  303. // If we just processed the first instruction in the block, we're done.
  304. } while (!ProcessedBegin);
  305. return MadeChange;
  306. }
  307. bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
  308. MachineBasicBlock *From,
  309. MachineBasicBlock *To) {
  310. // FIXME: Need much better heuristics.
  311. // If the pass has already considered breaking this edge (during this pass
  312. // through the function), then let's go ahead and break it. This means
  313. // sinking multiple "cheap" instructions into the same block.
  314. if (!CEBCandidates.insert(std::make_pair(From, To)).second)
  315. return true;
  316. if (!MI->isCopy() && !TII->isAsCheapAsAMove(MI))
  317. return true;
  318. // MI is cheap, we probably don't want to break the critical edge for it.
  319. // However, if this would allow some definitions of its source operands
  320. // to be sunk then it's probably worth it.
  321. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  322. const MachineOperand &MO = MI->getOperand(i);
  323. if (!MO.isReg() || !MO.isUse())
  324. continue;
  325. unsigned Reg = MO.getReg();
  326. if (Reg == 0)
  327. continue;
  328. // We don't move live definitions of physical registers,
  329. // so sinking their uses won't enable any opportunities.
  330. if (TargetRegisterInfo::isPhysicalRegister(Reg))
  331. continue;
  332. // If this instruction is the only user of a virtual register,
  333. // check if breaking the edge will enable sinking
  334. // both this instruction and the defining instruction.
  335. if (MRI->hasOneNonDBGUse(Reg)) {
  336. // If the definition resides in same MBB,
  337. // claim it's likely we can sink these together.
  338. // If definition resides elsewhere, we aren't
  339. // blocking it from being sunk so don't break the edge.
  340. MachineInstr *DefMI = MRI->getVRegDef(Reg);
  341. if (DefMI->getParent() == MI->getParent())
  342. return true;
  343. }
  344. }
  345. return false;
  346. }
  347. bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr *MI,
  348. MachineBasicBlock *FromBB,
  349. MachineBasicBlock *ToBB,
  350. bool BreakPHIEdge) {
  351. if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
  352. return false;
  353. // Avoid breaking back edge. From == To means backedge for single BB loop.
  354. if (!SplitEdges || FromBB == ToBB)
  355. return false;
  356. // Check for backedges of more "complex" loops.
  357. if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
  358. LI->isLoopHeader(ToBB))
  359. return false;
  360. // It's not always legal to break critical edges and sink the computation
  361. // to the edge.
  362. //
  363. // BB#1:
  364. // v1024
  365. // Beq BB#3
  366. // <fallthrough>
  367. // BB#2:
  368. // ... no uses of v1024
  369. // <fallthrough>
  370. // BB#3:
  371. // ...
  372. // = v1024
  373. //
  374. // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
  375. //
  376. // BB#1:
  377. // ...
  378. // Bne BB#2
  379. // BB#4:
  380. // v1024 =
  381. // B BB#3
  382. // BB#2:
  383. // ... no uses of v1024
  384. // <fallthrough>
  385. // BB#3:
  386. // ...
  387. // = v1024
  388. //
  389. // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
  390. // flow. We need to ensure the new basic block where the computation is
  391. // sunk to dominates all the uses.
  392. // It's only legal to break critical edge and sink the computation to the
  393. // new block if all the predecessors of "To", except for "From", are
  394. // not dominated by "From". Given SSA property, this means these
  395. // predecessors are dominated by "To".
  396. //
  397. // There is no need to do this check if all the uses are PHI nodes. PHI
  398. // sources are only defined on the specific predecessor edges.
  399. if (!BreakPHIEdge) {
  400. for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
  401. E = ToBB->pred_end(); PI != E; ++PI) {
  402. if (*PI == FromBB)
  403. continue;
  404. if (!DT->dominates(ToBB, *PI))
  405. return false;
  406. }
  407. }
  408. ToSplit.insert(std::make_pair(FromBB, ToBB));
  409. return true;
  410. }
  411. static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
  412. return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence();
  413. }
  414. /// collectDebgValues - Scan instructions following MI and collect any
  415. /// matching DBG_VALUEs.
  416. static void collectDebugValues(MachineInstr *MI,
  417. SmallVectorImpl<MachineInstr *> &DbgValues) {
  418. DbgValues.clear();
  419. if (!MI->getOperand(0).isReg())
  420. return;
  421. MachineBasicBlock::iterator DI = MI; ++DI;
  422. for (MachineBasicBlock::iterator DE = MI->getParent()->end();
  423. DI != DE; ++DI) {
  424. if (!DI->isDebugValue())
  425. return;
  426. if (DI->getOperand(0).isReg() &&
  427. DI->getOperand(0).getReg() == MI->getOperand(0).getReg())
  428. DbgValues.push_back(DI);
  429. }
  430. }
  431. /// isProfitableToSinkTo - Return true if it is profitable to sink MI.
  432. bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
  433. MachineBasicBlock *MBB,
  434. MachineBasicBlock *SuccToSinkTo,
  435. AllSuccsCache &AllSuccessors) {
  436. assert (MI && "Invalid MachineInstr!");
  437. assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
  438. if (MBB == SuccToSinkTo)
  439. return false;
  440. // It is profitable if SuccToSinkTo does not post dominate current block.
  441. if (!PDT->dominates(SuccToSinkTo, MBB))
  442. return true;
  443. // It is profitable to sink an instruction from a deeper loop to a shallower
  444. // loop, even if the latter post-dominates the former (PR21115).
  445. if (LI->getLoopDepth(MBB) > LI->getLoopDepth(SuccToSinkTo))
  446. return true;
  447. // Check if only use in post dominated block is PHI instruction.
  448. bool NonPHIUse = false;
  449. for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
  450. MachineBasicBlock *UseBlock = UseInst.getParent();
  451. if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
  452. NonPHIUse = true;
  453. }
  454. if (!NonPHIUse)
  455. return true;
  456. // If SuccToSinkTo post dominates then also it may be profitable if MI
  457. // can further profitably sinked into another block in next round.
  458. bool BreakPHIEdge = false;
  459. // FIXME - If finding successor is compile time expensive then cache results.
  460. if (MachineBasicBlock *MBB2 =
  461. FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
  462. return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
  463. // If SuccToSinkTo is final destination and it is a post dominator of current
  464. // block then it is not profitable to sink MI into SuccToSinkTo block.
  465. return false;
  466. }
  467. /// Get the sorted sequence of successors for this MachineBasicBlock, possibly
  468. /// computing it if it was not already cached.
  469. SmallVector<MachineBasicBlock *, 4> &
  470. MachineSinking::GetAllSortedSuccessors(MachineInstr *MI, MachineBasicBlock *MBB,
  471. AllSuccsCache &AllSuccessors) const {
  472. // Do we have the sorted successors in cache ?
  473. auto Succs = AllSuccessors.find(MBB);
  474. if (Succs != AllSuccessors.end())
  475. return Succs->second;
  476. SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->succ_begin(),
  477. MBB->succ_end());
  478. // Handle cases where sinking can happen but where the sink point isn't a
  479. // successor. For example:
  480. //
  481. // x = computation
  482. // if () {} else {}
  483. // use x
  484. //
  485. const std::vector<MachineDomTreeNode *> &Children =
  486. DT->getNode(MBB)->getChildren();
  487. for (const auto &DTChild : Children)
  488. // DomTree children of MBB that have MBB as immediate dominator are added.
  489. if (DTChild->getIDom()->getBlock() == MI->getParent() &&
  490. // Skip MBBs already added to the AllSuccs vector above.
  491. !MBB->isSuccessor(DTChild->getBlock()))
  492. AllSuccs.push_back(DTChild->getBlock());
  493. // Sort Successors according to their loop depth or block frequency info.
  494. std::stable_sort(
  495. AllSuccs.begin(), AllSuccs.end(),
  496. [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
  497. uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
  498. uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
  499. bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
  500. return HasBlockFreq ? LHSFreq < RHSFreq
  501. : LI->getLoopDepth(L) < LI->getLoopDepth(R);
  502. });
  503. auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
  504. return it.first->second;
  505. }
  506. /// FindSuccToSinkTo - Find a successor to sink this instruction to.
  507. MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
  508. MachineBasicBlock *MBB,
  509. bool &BreakPHIEdge,
  510. AllSuccsCache &AllSuccessors) {
  511. assert (MI && "Invalid MachineInstr!");
  512. assert (MBB && "Invalid MachineBasicBlock!");
  513. // Loop over all the operands of the specified instruction. If there is
  514. // anything we can't handle, bail out.
  515. // SuccToSinkTo - This is the successor to sink this instruction to, once we
  516. // decide.
  517. MachineBasicBlock *SuccToSinkTo = nullptr;
  518. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  519. const MachineOperand &MO = MI->getOperand(i);
  520. if (!MO.isReg()) continue; // Ignore non-register operands.
  521. unsigned Reg = MO.getReg();
  522. if (Reg == 0) continue;
  523. if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
  524. if (MO.isUse()) {
  525. // If the physreg has no defs anywhere, it's just an ambient register
  526. // and we can freely move its uses. Alternatively, if it's allocatable,
  527. // it could get allocated to something with a def during allocation.
  528. if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
  529. return nullptr;
  530. } else if (!MO.isDead()) {
  531. // A def that isn't dead. We can't move it.
  532. return nullptr;
  533. }
  534. } else {
  535. // Virtual register uses are always safe to sink.
  536. if (MO.isUse()) continue;
  537. // If it's not safe to move defs of the register class, then abort.
  538. if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
  539. return nullptr;
  540. // Virtual register defs can only be sunk if all their uses are in blocks
  541. // dominated by one of the successors.
  542. if (SuccToSinkTo) {
  543. // If a previous operand picked a block to sink to, then this operand
  544. // must be sinkable to the same block.
  545. bool LocalUse = false;
  546. if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
  547. BreakPHIEdge, LocalUse))
  548. return nullptr;
  549. continue;
  550. }
  551. // Otherwise, we should look at all the successors and decide which one
  552. // we should sink to. If we have reliable block frequency information
  553. // (frequency != 0) available, give successors with smaller frequencies
  554. // higher priority, otherwise prioritize smaller loop depths.
  555. for (MachineBasicBlock *SuccBlock :
  556. GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
  557. bool LocalUse = false;
  558. if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
  559. BreakPHIEdge, LocalUse)) {
  560. SuccToSinkTo = SuccBlock;
  561. break;
  562. }
  563. if (LocalUse)
  564. // Def is used locally, it's never safe to move this def.
  565. return nullptr;
  566. }
  567. // If we couldn't find a block to sink to, ignore this instruction.
  568. if (!SuccToSinkTo)
  569. return nullptr;
  570. if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
  571. return nullptr;
  572. }
  573. }
  574. // It is not possible to sink an instruction into its own block. This can
  575. // happen with loops.
  576. if (MBB == SuccToSinkTo)
  577. return nullptr;
  578. // It's not safe to sink instructions to EH landing pad. Control flow into
  579. // landing pad is implicitly defined.
  580. if (SuccToSinkTo && SuccToSinkTo->isEHPad())
  581. return nullptr;
  582. return SuccToSinkTo;
  583. }
  584. /// \brief Return true if MI is likely to be usable as a memory operation by the
  585. /// implicit null check optimization.
  586. ///
  587. /// This is a "best effort" heuristic, and should not be relied upon for
  588. /// correctness. This returning true does not guarantee that the implicit null
  589. /// check optimization is legal over MI, and this returning false does not
  590. /// guarantee MI cannot possibly be used to do a null check.
  591. static bool SinkingPreventsImplicitNullCheck(MachineInstr *MI,
  592. const TargetInstrInfo *TII,
  593. const TargetRegisterInfo *TRI) {
  594. typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
  595. auto *MBB = MI->getParent();
  596. if (MBB->pred_size() != 1)
  597. return false;
  598. auto *PredMBB = *MBB->pred_begin();
  599. auto *PredBB = PredMBB->getBasicBlock();
  600. // Frontends that don't use implicit null checks have no reason to emit
  601. // branches with make.implicit metadata, and this function should always
  602. // return false for them.
  603. if (!PredBB ||
  604. !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
  605. return false;
  606. unsigned BaseReg;
  607. int64_t Offset;
  608. if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
  609. return false;
  610. if (!(MI->mayLoad() && !MI->isPredicable()))
  611. return false;
  612. MachineBranchPredicate MBP;
  613. if (TII->AnalyzeBranchPredicate(*PredMBB, MBP, false))
  614. return false;
  615. return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
  616. (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
  617. MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
  618. MBP.LHS.getReg() == BaseReg;
  619. }
  620. /// SinkInstruction - Determine whether it is safe to sink the specified machine
  621. /// instruction out of its current block into a successor.
  622. bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore,
  623. AllSuccsCache &AllSuccessors) {
  624. // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
  625. // be close to the source to make it easier to coalesce.
  626. if (AvoidsSinking(MI, MRI))
  627. return false;
  628. // Check if it's safe to move the instruction.
  629. if (!MI->isSafeToMove(AA, SawStore))
  630. return false;
  631. // Convergent operations may not be made control-dependent on additional
  632. // values.
  633. if (MI->isConvergent())
  634. return false;
  635. // Don't break implicit null checks. This is a performance heuristic, and not
  636. // required for correctness.
  637. if (SinkingPreventsImplicitNullCheck(MI, TII, TRI))
  638. return false;
  639. // FIXME: This should include support for sinking instructions within the
  640. // block they are currently in to shorten the live ranges. We often get
  641. // instructions sunk into the top of a large block, but it would be better to
  642. // also sink them down before their first use in the block. This xform has to
  643. // be careful not to *increase* register pressure though, e.g. sinking
  644. // "x = y + z" down if it kills y and z would increase the live ranges of y
  645. // and z and only shrink the live range of x.
  646. bool BreakPHIEdge = false;
  647. MachineBasicBlock *ParentBlock = MI->getParent();
  648. MachineBasicBlock *SuccToSinkTo =
  649. FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
  650. // If there are no outputs, it must have side-effects.
  651. if (!SuccToSinkTo)
  652. return false;
  653. // If the instruction to move defines a dead physical register which is live
  654. // when leaving the basic block, don't move it because it could turn into a
  655. // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
  656. for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
  657. const MachineOperand &MO = MI->getOperand(I);
  658. if (!MO.isReg()) continue;
  659. unsigned Reg = MO.getReg();
  660. if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
  661. if (SuccToSinkTo->isLiveIn(Reg))
  662. return false;
  663. }
  664. DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
  665. // If the block has multiple predecessors, this is a critical edge.
  666. // Decide if we can sink along it or need to break the edge.
  667. if (SuccToSinkTo->pred_size() > 1) {
  668. // We cannot sink a load across a critical edge - there may be stores in
  669. // other code paths.
  670. bool TryBreak = false;
  671. bool store = true;
  672. if (!MI->isSafeToMove(AA, store)) {
  673. DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
  674. TryBreak = true;
  675. }
  676. // We don't want to sink across a critical edge if we don't dominate the
  677. // successor. We could be introducing calculations to new code paths.
  678. if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
  679. DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
  680. TryBreak = true;
  681. }
  682. // Don't sink instructions into a loop.
  683. if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
  684. DEBUG(dbgs() << " *** NOTE: Loop header found\n");
  685. TryBreak = true;
  686. }
  687. // Otherwise we are OK with sinking along a critical edge.
  688. if (!TryBreak)
  689. DEBUG(dbgs() << "Sinking along critical edge.\n");
  690. else {
  691. // Mark this edge as to be split.
  692. // If the edge can actually be split, the next iteration of the main loop
  693. // will sink MI in the newly created block.
  694. bool Status =
  695. PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
  696. if (!Status)
  697. DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
  698. "break critical edge\n");
  699. // The instruction will not be sunk this time.
  700. return false;
  701. }
  702. }
  703. if (BreakPHIEdge) {
  704. // BreakPHIEdge is true if all the uses are in the successor MBB being
  705. // sunken into and they are all PHI nodes. In this case, machine-sink must
  706. // break the critical edge first.
  707. bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
  708. SuccToSinkTo, BreakPHIEdge);
  709. if (!Status)
  710. DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
  711. "break critical edge\n");
  712. // The instruction will not be sunk this time.
  713. return false;
  714. }
  715. // Determine where to insert into. Skip phi nodes.
  716. MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
  717. while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
  718. ++InsertPos;
  719. // collect matching debug values.
  720. SmallVector<MachineInstr *, 2> DbgValuesToSink;
  721. collectDebugValues(MI, DbgValuesToSink);
  722. // Move the instruction.
  723. SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
  724. ++MachineBasicBlock::iterator(MI));
  725. // Move debug values.
  726. for (SmallVectorImpl<MachineInstr *>::iterator DBI = DbgValuesToSink.begin(),
  727. DBE = DbgValuesToSink.end(); DBI != DBE; ++DBI) {
  728. MachineInstr *DbgMI = *DBI;
  729. SuccToSinkTo->splice(InsertPos, ParentBlock, DbgMI,
  730. ++MachineBasicBlock::iterator(DbgMI));
  731. }
  732. // Conservatively, clear any kill flags, since it's possible that they are no
  733. // longer correct.
  734. // Note that we have to clear the kill flags for any register this instruction
  735. // uses as we may sink over another instruction which currently kills the
  736. // used registers.
  737. for (MachineOperand &MO : MI->operands()) {
  738. if (MO.isReg() && MO.isUse())
  739. RegsToClearKillFlags.set(MO.getReg()); // Remember to clear kill flags.
  740. }
  741. return true;
  742. }