MachineSink.cpp 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218
  1. //===- MachineSink.cpp - Sinking for machine instructions -----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass moves instructions into successor blocks when possible, so that
  10. // they aren't executed on paths where their results aren't needed.
  11. //
  12. // This pass is not intended to be a replacement or a complete alternative
  13. // for an LLVM-IR-level sinking pass. It is only designed to sink simple
  14. // constructs that are not exposed before lowering and instruction selection.
  15. //
  16. //===----------------------------------------------------------------------===//
  17. #include "llvm/ADT/SetVector.h"
  18. #include "llvm/ADT/SmallSet.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/ADT/SparseBitVector.h"
  21. #include "llvm/ADT/Statistic.h"
  22. #include "llvm/Analysis/AliasAnalysis.h"
  23. #include "llvm/CodeGen/MachineBasicBlock.h"
  24. #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
  25. #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
  26. #include "llvm/CodeGen/MachineDominators.h"
  27. #include "llvm/CodeGen/MachineFunction.h"
  28. #include "llvm/CodeGen/MachineFunctionPass.h"
  29. #include "llvm/CodeGen/MachineInstr.h"
  30. #include "llvm/CodeGen/MachineLoopInfo.h"
  31. #include "llvm/CodeGen/MachineOperand.h"
  32. #include "llvm/CodeGen/MachinePostDominators.h"
  33. #include "llvm/CodeGen/MachineRegisterInfo.h"
  34. #include "llvm/CodeGen/TargetInstrInfo.h"
  35. #include "llvm/CodeGen/TargetRegisterInfo.h"
  36. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  37. #include "llvm/IR/BasicBlock.h"
  38. #include "llvm/IR/LLVMContext.h"
  39. #include "llvm/IR/DebugInfoMetadata.h"
  40. #include "llvm/Pass.h"
  41. #include "llvm/Support/BranchProbability.h"
  42. #include "llvm/Support/CommandLine.h"
  43. #include "llvm/Support/Debug.h"
  44. #include "llvm/Support/raw_ostream.h"
  45. #include <algorithm>
  46. #include <cassert>
  47. #include <cstdint>
  48. #include <map>
  49. #include <utility>
  50. #include <vector>
  51. using namespace llvm;
  52. #define DEBUG_TYPE "machine-sink"
  53. static cl::opt<bool>
  54. SplitEdges("machine-sink-split",
  55. cl::desc("Split critical edges during machine sinking"),
  56. cl::init(true), cl::Hidden);
  57. static cl::opt<bool>
  58. UseBlockFreqInfo("machine-sink-bfi",
  59. cl::desc("Use block frequency info to find successors to sink"),
  60. cl::init(true), cl::Hidden);
  61. static cl::opt<unsigned> SplitEdgeProbabilityThreshold(
  62. "machine-sink-split-probability-threshold",
  63. cl::desc(
  64. "Percentage threshold for splitting single-instruction critical edge. "
  65. "If the branch threshold is higher than this threshold, we allow "
  66. "speculative execution of up to 1 instruction to avoid branching to "
  67. "splitted critical edge"),
  68. cl::init(40), cl::Hidden);
  69. STATISTIC(NumSunk, "Number of machine instructions sunk");
  70. STATISTIC(NumSplit, "Number of critical edges split");
  71. STATISTIC(NumCoalesces, "Number of copies coalesced");
  72. STATISTIC(NumPostRACopySink, "Number of copies sunk after RA");
  73. namespace {
  74. class MachineSinking : public MachineFunctionPass {
  75. const TargetInstrInfo *TII;
  76. const TargetRegisterInfo *TRI;
  77. MachineRegisterInfo *MRI; // Machine register information
  78. MachineDominatorTree *DT; // Machine dominator tree
  79. MachinePostDominatorTree *PDT; // Machine post dominator tree
  80. MachineLoopInfo *LI;
  81. const MachineBlockFrequencyInfo *MBFI;
  82. const MachineBranchProbabilityInfo *MBPI;
  83. AliasAnalysis *AA;
  84. // Remember which edges have been considered for breaking.
  85. SmallSet<std::pair<MachineBasicBlock*, MachineBasicBlock*>, 8>
  86. CEBCandidates;
  87. // Remember which edges we are about to split.
  88. // This is different from CEBCandidates since those edges
  89. // will be split.
  90. SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit;
  91. SparseBitVector<> RegsToClearKillFlags;
  92. using AllSuccsCache =
  93. std::map<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>;
  94. public:
  95. static char ID; // Pass identification
  96. MachineSinking() : MachineFunctionPass(ID) {
  97. initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
  98. }
  99. bool runOnMachineFunction(MachineFunction &MF) override;
  100. void getAnalysisUsage(AnalysisUsage &AU) const override {
  101. AU.setPreservesCFG();
  102. MachineFunctionPass::getAnalysisUsage(AU);
  103. AU.addRequired<AAResultsWrapperPass>();
  104. AU.addRequired<MachineDominatorTree>();
  105. AU.addRequired<MachinePostDominatorTree>();
  106. AU.addRequired<MachineLoopInfo>();
  107. AU.addRequired<MachineBranchProbabilityInfo>();
  108. AU.addPreserved<MachineDominatorTree>();
  109. AU.addPreserved<MachinePostDominatorTree>();
  110. AU.addPreserved<MachineLoopInfo>();
  111. if (UseBlockFreqInfo)
  112. AU.addRequired<MachineBlockFrequencyInfo>();
  113. }
  114. void releaseMemory() override {
  115. CEBCandidates.clear();
  116. }
  117. private:
  118. bool ProcessBlock(MachineBasicBlock &MBB);
  119. bool isWorthBreakingCriticalEdge(MachineInstr &MI,
  120. MachineBasicBlock *From,
  121. MachineBasicBlock *To);
  122. /// Postpone the splitting of the given critical
  123. /// edge (\p From, \p To).
  124. ///
  125. /// We do not split the edges on the fly. Indeed, this invalidates
  126. /// the dominance information and thus triggers a lot of updates
  127. /// of that information underneath.
  128. /// Instead, we postpone all the splits after each iteration of
  129. /// the main loop. That way, the information is at least valid
  130. /// for the lifetime of an iteration.
  131. ///
  132. /// \return True if the edge is marked as toSplit, false otherwise.
  133. /// False can be returned if, for instance, this is not profitable.
  134. bool PostponeSplitCriticalEdge(MachineInstr &MI,
  135. MachineBasicBlock *From,
  136. MachineBasicBlock *To,
  137. bool BreakPHIEdge);
  138. bool SinkInstruction(MachineInstr &MI, bool &SawStore,
  139. AllSuccsCache &AllSuccessors);
  140. bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
  141. MachineBasicBlock *DefMBB,
  142. bool &BreakPHIEdge, bool &LocalUse) const;
  143. MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
  144. bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
  145. bool isProfitableToSinkTo(unsigned Reg, MachineInstr &MI,
  146. MachineBasicBlock *MBB,
  147. MachineBasicBlock *SuccToSinkTo,
  148. AllSuccsCache &AllSuccessors);
  149. bool PerformTrivialForwardCoalescing(MachineInstr &MI,
  150. MachineBasicBlock *MBB);
  151. SmallVector<MachineBasicBlock *, 4> &
  152. GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
  153. AllSuccsCache &AllSuccessors) const;
  154. };
  155. } // end anonymous namespace
  156. char MachineSinking::ID = 0;
  157. char &llvm::MachineSinkingID = MachineSinking::ID;
  158. INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE,
  159. "Machine code sinking", false, false)
  160. INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
  161. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  162. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  163. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  164. INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE,
  165. "Machine code sinking", false, false)
  166. bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
  167. MachineBasicBlock *MBB) {
  168. if (!MI.isCopy())
  169. return false;
  170. unsigned SrcReg = MI.getOperand(1).getReg();
  171. unsigned DstReg = MI.getOperand(0).getReg();
  172. if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
  173. !TargetRegisterInfo::isVirtualRegister(DstReg) ||
  174. !MRI->hasOneNonDBGUse(SrcReg))
  175. return false;
  176. const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
  177. const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
  178. if (SRC != DRC)
  179. return false;
  180. MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
  181. if (DefMI->isCopyLike())
  182. return false;
  183. LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI);
  184. LLVM_DEBUG(dbgs() << "*** to: " << MI);
  185. MRI->replaceRegWith(DstReg, SrcReg);
  186. MI.eraseFromParent();
  187. // Conservatively, clear any kill flags, since it's possible that they are no
  188. // longer correct.
  189. MRI->clearKillFlags(SrcReg);
  190. ++NumCoalesces;
  191. return true;
  192. }
  193. /// AllUsesDominatedByBlock - Return true if all uses of the specified register
  194. /// occur in blocks dominated by the specified block. If any use is in the
  195. /// definition block, then return false since it is never legal to move def
  196. /// after uses.
  197. bool
  198. MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
  199. MachineBasicBlock *MBB,
  200. MachineBasicBlock *DefMBB,
  201. bool &BreakPHIEdge,
  202. bool &LocalUse) const {
  203. assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
  204. "Only makes sense for vregs");
  205. // Ignore debug uses because debug info doesn't affect the code.
  206. if (MRI->use_nodbg_empty(Reg))
  207. return true;
  208. // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
  209. // into and they are all PHI nodes. In this case, machine-sink must break
  210. // the critical edge first. e.g.
  211. //
  212. // %bb.1: derived from LLVM BB %bb4.preheader
  213. // Predecessors according to CFG: %bb.0
  214. // ...
  215. // %reg16385 = DEC64_32r %reg16437, implicit-def dead %eflags
  216. // ...
  217. // JE_4 <%bb.37>, implicit %eflags
  218. // Successors according to CFG: %bb.37 %bb.2
  219. //
  220. // %bb.2: derived from LLVM BB %bb.nph
  221. // Predecessors according to CFG: %bb.0 %bb.1
  222. // %reg16386 = PHI %reg16434, %bb.0, %reg16385, %bb.1
  223. BreakPHIEdge = true;
  224. for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
  225. MachineInstr *UseInst = MO.getParent();
  226. unsigned OpNo = &MO - &UseInst->getOperand(0);
  227. MachineBasicBlock *UseBlock = UseInst->getParent();
  228. if (!(UseBlock == MBB && UseInst->isPHI() &&
  229. UseInst->getOperand(OpNo+1).getMBB() == DefMBB)) {
  230. BreakPHIEdge = false;
  231. break;
  232. }
  233. }
  234. if (BreakPHIEdge)
  235. return true;
  236. for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
  237. // Determine the block of the use.
  238. MachineInstr *UseInst = MO.getParent();
  239. unsigned OpNo = &MO - &UseInst->getOperand(0);
  240. MachineBasicBlock *UseBlock = UseInst->getParent();
  241. if (UseInst->isPHI()) {
  242. // PHI nodes use the operand in the predecessor block, not the block with
  243. // the PHI.
  244. UseBlock = UseInst->getOperand(OpNo+1).getMBB();
  245. } else if (UseBlock == DefMBB) {
  246. LocalUse = true;
  247. return false;
  248. }
  249. // Check that it dominates.
  250. if (!DT->dominates(MBB, UseBlock))
  251. return false;
  252. }
  253. return true;
  254. }
  255. bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
  256. if (skipFunction(MF.getFunction()))
  257. return false;
  258. LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n");
  259. TII = MF.getSubtarget().getInstrInfo();
  260. TRI = MF.getSubtarget().getRegisterInfo();
  261. MRI = &MF.getRegInfo();
  262. DT = &getAnalysis<MachineDominatorTree>();
  263. PDT = &getAnalysis<MachinePostDominatorTree>();
  264. LI = &getAnalysis<MachineLoopInfo>();
  265. MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr;
  266. MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
  267. AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  268. bool EverMadeChange = false;
  269. while (true) {
  270. bool MadeChange = false;
  271. // Process all basic blocks.
  272. CEBCandidates.clear();
  273. ToSplit.clear();
  274. for (auto &MBB: MF)
  275. MadeChange |= ProcessBlock(MBB);
  276. // If we have anything we marked as toSplit, split it now.
  277. for (auto &Pair : ToSplit) {
  278. auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this);
  279. if (NewSucc != nullptr) {
  280. LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "
  281. << printMBBReference(*Pair.first) << " -- "
  282. << printMBBReference(*NewSucc) << " -- "
  283. << printMBBReference(*Pair.second) << '\n');
  284. MadeChange = true;
  285. ++NumSplit;
  286. } else
  287. LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");
  288. }
  289. // If this iteration over the code changed anything, keep iterating.
  290. if (!MadeChange) break;
  291. EverMadeChange = true;
  292. }
  293. // Now clear any kill flags for recorded registers.
  294. for (auto I : RegsToClearKillFlags)
  295. MRI->clearKillFlags(I);
  296. RegsToClearKillFlags.clear();
  297. return EverMadeChange;
  298. }
  299. bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
  300. // Can't sink anything out of a block that has less than two successors.
  301. if (MBB.succ_size() <= 1 || MBB.empty()) return false;
  302. // Don't bother sinking code out of unreachable blocks. In addition to being
  303. // unprofitable, it can also lead to infinite looping, because in an
  304. // unreachable loop there may be nowhere to stop.
  305. if (!DT->isReachableFromEntry(&MBB)) return false;
  306. bool MadeChange = false;
  307. // Cache all successors, sorted by frequency info and loop depth.
  308. AllSuccsCache AllSuccessors;
  309. // Walk the basic block bottom-up. Remember if we saw a store.
  310. MachineBasicBlock::iterator I = MBB.end();
  311. --I;
  312. bool ProcessedBegin, SawStore = false;
  313. do {
  314. MachineInstr &MI = *I; // The instruction to sink.
  315. // Predecrement I (if it's not begin) so that it isn't invalidated by
  316. // sinking.
  317. ProcessedBegin = I == MBB.begin();
  318. if (!ProcessedBegin)
  319. --I;
  320. if (MI.isDebugInstr())
  321. continue;
  322. bool Joined = PerformTrivialForwardCoalescing(MI, &MBB);
  323. if (Joined) {
  324. MadeChange = true;
  325. continue;
  326. }
  327. if (SinkInstruction(MI, SawStore, AllSuccessors)) {
  328. ++NumSunk;
  329. MadeChange = true;
  330. }
  331. // If we just processed the first instruction in the block, we're done.
  332. } while (!ProcessedBegin);
  333. return MadeChange;
  334. }
  335. bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI,
  336. MachineBasicBlock *From,
  337. MachineBasicBlock *To) {
  338. // FIXME: Need much better heuristics.
  339. // If the pass has already considered breaking this edge (during this pass
  340. // through the function), then let's go ahead and break it. This means
  341. // sinking multiple "cheap" instructions into the same block.
  342. if (!CEBCandidates.insert(std::make_pair(From, To)).second)
  343. return true;
  344. if (!MI.isCopy() && !TII->isAsCheapAsAMove(MI))
  345. return true;
  346. if (From->isSuccessor(To) && MBPI->getEdgeProbability(From, To) <=
  347. BranchProbability(SplitEdgeProbabilityThreshold, 100))
  348. return true;
  349. // MI is cheap, we probably don't want to break the critical edge for it.
  350. // However, if this would allow some definitions of its source operands
  351. // to be sunk then it's probably worth it.
  352. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
  353. const MachineOperand &MO = MI.getOperand(i);
  354. if (!MO.isReg() || !MO.isUse())
  355. continue;
  356. unsigned Reg = MO.getReg();
  357. if (Reg == 0)
  358. continue;
  359. // We don't move live definitions of physical registers,
  360. // so sinking their uses won't enable any opportunities.
  361. if (TargetRegisterInfo::isPhysicalRegister(Reg))
  362. continue;
  363. // If this instruction is the only user of a virtual register,
  364. // check if breaking the edge will enable sinking
  365. // both this instruction and the defining instruction.
  366. if (MRI->hasOneNonDBGUse(Reg)) {
  367. // If the definition resides in same MBB,
  368. // claim it's likely we can sink these together.
  369. // If definition resides elsewhere, we aren't
  370. // blocking it from being sunk so don't break the edge.
  371. MachineInstr *DefMI = MRI->getVRegDef(Reg);
  372. if (DefMI->getParent() == MI.getParent())
  373. return true;
  374. }
  375. }
  376. return false;
  377. }
  378. bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,
  379. MachineBasicBlock *FromBB,
  380. MachineBasicBlock *ToBB,
  381. bool BreakPHIEdge) {
  382. if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
  383. return false;
  384. // Avoid breaking back edge. From == To means backedge for single BB loop.
  385. if (!SplitEdges || FromBB == ToBB)
  386. return false;
  387. // Check for backedges of more "complex" loops.
  388. if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
  389. LI->isLoopHeader(ToBB))
  390. return false;
  391. // It's not always legal to break critical edges and sink the computation
  392. // to the edge.
  393. //
  394. // %bb.1:
  395. // v1024
  396. // Beq %bb.3
  397. // <fallthrough>
  398. // %bb.2:
  399. // ... no uses of v1024
  400. // <fallthrough>
  401. // %bb.3:
  402. // ...
  403. // = v1024
  404. //
  405. // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted:
  406. //
  407. // %bb.1:
  408. // ...
  409. // Bne %bb.2
  410. // %bb.4:
  411. // v1024 =
  412. // B %bb.3
  413. // %bb.2:
  414. // ... no uses of v1024
  415. // <fallthrough>
  416. // %bb.3:
  417. // ...
  418. // = v1024
  419. //
  420. // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3
  421. // flow. We need to ensure the new basic block where the computation is
  422. // sunk to dominates all the uses.
  423. // It's only legal to break critical edge and sink the computation to the
  424. // new block if all the predecessors of "To", except for "From", are
  425. // not dominated by "From". Given SSA property, this means these
  426. // predecessors are dominated by "To".
  427. //
  428. // There is no need to do this check if all the uses are PHI nodes. PHI
  429. // sources are only defined on the specific predecessor edges.
  430. if (!BreakPHIEdge) {
  431. for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
  432. E = ToBB->pred_end(); PI != E; ++PI) {
  433. if (*PI == FromBB)
  434. continue;
  435. if (!DT->dominates(ToBB, *PI))
  436. return false;
  437. }
  438. }
  439. ToSplit.insert(std::make_pair(FromBB, ToBB));
  440. return true;
  441. }
  442. /// isProfitableToSinkTo - Return true if it is profitable to sink MI.
  443. bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr &MI,
  444. MachineBasicBlock *MBB,
  445. MachineBasicBlock *SuccToSinkTo,
  446. AllSuccsCache &AllSuccessors) {
  447. assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
  448. if (MBB == SuccToSinkTo)
  449. return false;
  450. // It is profitable if SuccToSinkTo does not post dominate current block.
  451. if (!PDT->dominates(SuccToSinkTo, MBB))
  452. return true;
  453. // It is profitable to sink an instruction from a deeper loop to a shallower
  454. // loop, even if the latter post-dominates the former (PR21115).
  455. if (LI->getLoopDepth(MBB) > LI->getLoopDepth(SuccToSinkTo))
  456. return true;
  457. // Check if only use in post dominated block is PHI instruction.
  458. bool NonPHIUse = false;
  459. for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
  460. MachineBasicBlock *UseBlock = UseInst.getParent();
  461. if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
  462. NonPHIUse = true;
  463. }
  464. if (!NonPHIUse)
  465. return true;
  466. // If SuccToSinkTo post dominates then also it may be profitable if MI
  467. // can further profitably sinked into another block in next round.
  468. bool BreakPHIEdge = false;
  469. // FIXME - If finding successor is compile time expensive then cache results.
  470. if (MachineBasicBlock *MBB2 =
  471. FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
  472. return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
  473. // If SuccToSinkTo is final destination and it is a post dominator of current
  474. // block then it is not profitable to sink MI into SuccToSinkTo block.
  475. return false;
  476. }
  477. /// Get the sorted sequence of successors for this MachineBasicBlock, possibly
  478. /// computing it if it was not already cached.
  479. SmallVector<MachineBasicBlock *, 4> &
  480. MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
  481. AllSuccsCache &AllSuccessors) const {
  482. // Do we have the sorted successors in cache ?
  483. auto Succs = AllSuccessors.find(MBB);
  484. if (Succs != AllSuccessors.end())
  485. return Succs->second;
  486. SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->succ_begin(),
  487. MBB->succ_end());
  488. // Handle cases where sinking can happen but where the sink point isn't a
  489. // successor. For example:
  490. //
  491. // x = computation
  492. // if () {} else {}
  493. // use x
  494. //
  495. const std::vector<MachineDomTreeNode *> &Children =
  496. DT->getNode(MBB)->getChildren();
  497. for (const auto &DTChild : Children)
  498. // DomTree children of MBB that have MBB as immediate dominator are added.
  499. if (DTChild->getIDom()->getBlock() == MI.getParent() &&
  500. // Skip MBBs already added to the AllSuccs vector above.
  501. !MBB->isSuccessor(DTChild->getBlock()))
  502. AllSuccs.push_back(DTChild->getBlock());
  503. // Sort Successors according to their loop depth or block frequency info.
  504. std::stable_sort(
  505. AllSuccs.begin(), AllSuccs.end(),
  506. [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
  507. uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
  508. uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
  509. bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
  510. return HasBlockFreq ? LHSFreq < RHSFreq
  511. : LI->getLoopDepth(L) < LI->getLoopDepth(R);
  512. });
  513. auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
  514. return it.first->second;
  515. }
  516. /// FindSuccToSinkTo - Find a successor to sink this instruction to.
  517. MachineBasicBlock *
  518. MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
  519. bool &BreakPHIEdge,
  520. AllSuccsCache &AllSuccessors) {
  521. assert (MBB && "Invalid MachineBasicBlock!");
  522. // Loop over all the operands of the specified instruction. If there is
  523. // anything we can't handle, bail out.
  524. // SuccToSinkTo - This is the successor to sink this instruction to, once we
  525. // decide.
  526. MachineBasicBlock *SuccToSinkTo = nullptr;
  527. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
  528. const MachineOperand &MO = MI.getOperand(i);
  529. if (!MO.isReg()) continue; // Ignore non-register operands.
  530. unsigned Reg = MO.getReg();
  531. if (Reg == 0) continue;
  532. if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
  533. if (MO.isUse()) {
  534. // If the physreg has no defs anywhere, it's just an ambient register
  535. // and we can freely move its uses. Alternatively, if it's allocatable,
  536. // it could get allocated to something with a def during allocation.
  537. if (!MRI->isConstantPhysReg(Reg))
  538. return nullptr;
  539. } else if (!MO.isDead()) {
  540. // A def that isn't dead. We can't move it.
  541. return nullptr;
  542. }
  543. } else {
  544. // Virtual register uses are always safe to sink.
  545. if (MO.isUse()) continue;
  546. // If it's not safe to move defs of the register class, then abort.
  547. if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
  548. return nullptr;
  549. // Virtual register defs can only be sunk if all their uses are in blocks
  550. // dominated by one of the successors.
  551. if (SuccToSinkTo) {
  552. // If a previous operand picked a block to sink to, then this operand
  553. // must be sinkable to the same block.
  554. bool LocalUse = false;
  555. if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
  556. BreakPHIEdge, LocalUse))
  557. return nullptr;
  558. continue;
  559. }
  560. // Otherwise, we should look at all the successors and decide which one
  561. // we should sink to. If we have reliable block frequency information
  562. // (frequency != 0) available, give successors with smaller frequencies
  563. // higher priority, otherwise prioritize smaller loop depths.
  564. for (MachineBasicBlock *SuccBlock :
  565. GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
  566. bool LocalUse = false;
  567. if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
  568. BreakPHIEdge, LocalUse)) {
  569. SuccToSinkTo = SuccBlock;
  570. break;
  571. }
  572. if (LocalUse)
  573. // Def is used locally, it's never safe to move this def.
  574. return nullptr;
  575. }
  576. // If we couldn't find a block to sink to, ignore this instruction.
  577. if (!SuccToSinkTo)
  578. return nullptr;
  579. if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
  580. return nullptr;
  581. }
  582. }
  583. // It is not possible to sink an instruction into its own block. This can
  584. // happen with loops.
  585. if (MBB == SuccToSinkTo)
  586. return nullptr;
  587. // It's not safe to sink instructions to EH landing pad. Control flow into
  588. // landing pad is implicitly defined.
  589. if (SuccToSinkTo && SuccToSinkTo->isEHPad())
  590. return nullptr;
  591. return SuccToSinkTo;
  592. }
  593. /// Return true if MI is likely to be usable as a memory operation by the
  594. /// implicit null check optimization.
  595. ///
  596. /// This is a "best effort" heuristic, and should not be relied upon for
  597. /// correctness. This returning true does not guarantee that the implicit null
  598. /// check optimization is legal over MI, and this returning false does not
  599. /// guarantee MI cannot possibly be used to do a null check.
  600. static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
  601. const TargetInstrInfo *TII,
  602. const TargetRegisterInfo *TRI) {
  603. using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate;
  604. auto *MBB = MI.getParent();
  605. if (MBB->pred_size() != 1)
  606. return false;
  607. auto *PredMBB = *MBB->pred_begin();
  608. auto *PredBB = PredMBB->getBasicBlock();
  609. // Frontends that don't use implicit null checks have no reason to emit
  610. // branches with make.implicit metadata, and this function should always
  611. // return false for them.
  612. if (!PredBB ||
  613. !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
  614. return false;
  615. const MachineOperand *BaseOp;
  616. int64_t Offset;
  617. if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI))
  618. return false;
  619. if (!BaseOp->isReg())
  620. return false;
  621. if (!(MI.mayLoad() && !MI.isPredicable()))
  622. return false;
  623. MachineBranchPredicate MBP;
  624. if (TII->analyzeBranchPredicate(*PredMBB, MBP, false))
  625. return false;
  626. return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
  627. (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
  628. MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
  629. MBP.LHS.getReg() == BaseOp->getReg();
  630. }
  631. /// Sink an instruction and its associated debug instructions. If the debug
  632. /// instructions to be sunk are already known, they can be provided in DbgVals.
  633. static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo,
  634. MachineBasicBlock::iterator InsertPos,
  635. SmallVectorImpl<MachineInstr *> *DbgVals = nullptr) {
  636. // If debug values are provided use those, otherwise call collectDebugValues.
  637. SmallVector<MachineInstr *, 2> DbgValuesToSink;
  638. if (DbgVals)
  639. DbgValuesToSink.insert(DbgValuesToSink.begin(),
  640. DbgVals->begin(), DbgVals->end());
  641. else
  642. MI.collectDebugValues(DbgValuesToSink);
  643. // If we cannot find a location to use (merge with), then we erase the debug
  644. // location to prevent debug-info driven tools from potentially reporting
  645. // wrong location information.
  646. if (!SuccToSinkTo.empty() && InsertPos != SuccToSinkTo.end())
  647. MI.setDebugLoc(DILocation::getMergedLocation(MI.getDebugLoc(),
  648. InsertPos->getDebugLoc()));
  649. else
  650. MI.setDebugLoc(DebugLoc());
  651. // Move the instruction.
  652. MachineBasicBlock *ParentBlock = MI.getParent();
  653. SuccToSinkTo.splice(InsertPos, ParentBlock, MI,
  654. ++MachineBasicBlock::iterator(MI));
  655. // Move previously adjacent debug value instructions to the insert position.
  656. for (SmallVectorImpl<MachineInstr *>::iterator DBI = DbgValuesToSink.begin(),
  657. DBE = DbgValuesToSink.end();
  658. DBI != DBE; ++DBI) {
  659. MachineInstr *DbgMI = *DBI;
  660. SuccToSinkTo.splice(InsertPos, ParentBlock, DbgMI,
  661. ++MachineBasicBlock::iterator(DbgMI));
  662. }
  663. }
  664. /// SinkInstruction - Determine whether it is safe to sink the specified machine
  665. /// instruction out of its current block into a successor.
  666. bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
  667. AllSuccsCache &AllSuccessors) {
  668. // Don't sink instructions that the target prefers not to sink.
  669. if (!TII->shouldSink(MI))
  670. return false;
  671. // Check if it's safe to move the instruction.
  672. if (!MI.isSafeToMove(AA, SawStore))
  673. return false;
  674. // Convergent operations may not be made control-dependent on additional
  675. // values.
  676. if (MI.isConvergent())
  677. return false;
  678. // Don't break implicit null checks. This is a performance heuristic, and not
  679. // required for correctness.
  680. if (SinkingPreventsImplicitNullCheck(MI, TII, TRI))
  681. return false;
  682. // FIXME: This should include support for sinking instructions within the
  683. // block they are currently in to shorten the live ranges. We often get
  684. // instructions sunk into the top of a large block, but it would be better to
  685. // also sink them down before their first use in the block. This xform has to
  686. // be careful not to *increase* register pressure though, e.g. sinking
  687. // "x = y + z" down if it kills y and z would increase the live ranges of y
  688. // and z and only shrink the live range of x.
  689. bool BreakPHIEdge = false;
  690. MachineBasicBlock *ParentBlock = MI.getParent();
  691. MachineBasicBlock *SuccToSinkTo =
  692. FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
  693. // If there are no outputs, it must have side-effects.
  694. if (!SuccToSinkTo)
  695. return false;
  696. // If the instruction to move defines a dead physical register which is live
  697. // when leaving the basic block, don't move it because it could turn into a
  698. // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
  699. for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
  700. const MachineOperand &MO = MI.getOperand(I);
  701. if (!MO.isReg()) continue;
  702. unsigned Reg = MO.getReg();
  703. if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
  704. if (SuccToSinkTo->isLiveIn(Reg))
  705. return false;
  706. }
  707. LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);
  708. // If the block has multiple predecessors, this is a critical edge.
  709. // Decide if we can sink along it or need to break the edge.
  710. if (SuccToSinkTo->pred_size() > 1) {
  711. // We cannot sink a load across a critical edge - there may be stores in
  712. // other code paths.
  713. bool TryBreak = false;
  714. bool store = true;
  715. if (!MI.isSafeToMove(AA, store)) {
  716. LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
  717. TryBreak = true;
  718. }
  719. // We don't want to sink across a critical edge if we don't dominate the
  720. // successor. We could be introducing calculations to new code paths.
  721. if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
  722. LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
  723. TryBreak = true;
  724. }
  725. // Don't sink instructions into a loop.
  726. if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
  727. LLVM_DEBUG(dbgs() << " *** NOTE: Loop header found\n");
  728. TryBreak = true;
  729. }
  730. // Otherwise we are OK with sinking along a critical edge.
  731. if (!TryBreak)
  732. LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n");
  733. else {
  734. // Mark this edge as to be split.
  735. // If the edge can actually be split, the next iteration of the main loop
  736. // will sink MI in the newly created block.
  737. bool Status =
  738. PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
  739. if (!Status)
  740. LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
  741. "break critical edge\n");
  742. // The instruction will not be sunk this time.
  743. return false;
  744. }
  745. }
  746. if (BreakPHIEdge) {
  747. // BreakPHIEdge is true if all the uses are in the successor MBB being
  748. // sunken into and they are all PHI nodes. In this case, machine-sink must
  749. // break the critical edge first.
  750. bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
  751. SuccToSinkTo, BreakPHIEdge);
  752. if (!Status)
  753. LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
  754. "break critical edge\n");
  755. // The instruction will not be sunk this time.
  756. return false;
  757. }
  758. // Determine where to insert into. Skip phi nodes.
  759. MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
  760. while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
  761. ++InsertPos;
  762. performSink(MI, *SuccToSinkTo, InsertPos);
  763. // Conservatively, clear any kill flags, since it's possible that they are no
  764. // longer correct.
  765. // Note that we have to clear the kill flags for any register this instruction
  766. // uses as we may sink over another instruction which currently kills the
  767. // used registers.
  768. for (MachineOperand &MO : MI.operands()) {
  769. if (MO.isReg() && MO.isUse())
  770. RegsToClearKillFlags.set(MO.getReg()); // Remember to clear kill flags.
  771. }
  772. return true;
  773. }
  774. //===----------------------------------------------------------------------===//
  775. // This pass is not intended to be a replacement or a complete alternative
  776. // for the pre-ra machine sink pass. It is only designed to sink COPY
  777. // instructions which should be handled after RA.
  778. //
  779. // This pass sinks COPY instructions into a successor block, if the COPY is not
  780. // used in the current block and the COPY is live-in to a single successor
  781. // (i.e., doesn't require the COPY to be duplicated). This avoids executing the
  782. // copy on paths where their results aren't needed. This also exposes
  783. // additional opportunites for dead copy elimination and shrink wrapping.
  784. //
  785. // These copies were either not handled by or are inserted after the MachineSink
  786. // pass. As an example of the former case, the MachineSink pass cannot sink
  787. // COPY instructions with allocatable source registers; for AArch64 these type
  788. // of copy instructions are frequently used to move function parameters (PhyReg)
  789. // into virtual registers in the entry block.
  790. //
  791. // For the machine IR below, this pass will sink %w19 in the entry into its
  792. // successor (%bb.1) because %w19 is only live-in in %bb.1.
  793. // %bb.0:
  794. // %wzr = SUBSWri %w1, 1
  795. // %w19 = COPY %w0
  796. // Bcc 11, %bb.2
  797. // %bb.1:
  798. // Live Ins: %w19
  799. // BL @fun
  800. // %w0 = ADDWrr %w0, %w19
  801. // RET %w0
  802. // %bb.2:
  803. // %w0 = COPY %wzr
  804. // RET %w0
  805. // As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
  806. // able to see %bb.0 as a candidate.
  807. //===----------------------------------------------------------------------===//
  808. namespace {
  809. class PostRAMachineSinking : public MachineFunctionPass {
  810. public:
  811. bool runOnMachineFunction(MachineFunction &MF) override;
  812. static char ID;
  813. PostRAMachineSinking() : MachineFunctionPass(ID) {}
  814. StringRef getPassName() const override { return "PostRA Machine Sink"; }
  815. void getAnalysisUsage(AnalysisUsage &AU) const override {
  816. AU.setPreservesCFG();
  817. MachineFunctionPass::getAnalysisUsage(AU);
  818. }
  819. MachineFunctionProperties getRequiredProperties() const override {
  820. return MachineFunctionProperties().set(
  821. MachineFunctionProperties::Property::NoVRegs);
  822. }
  823. private:
  824. /// Track which register units have been modified and used.
  825. LiveRegUnits ModifiedRegUnits, UsedRegUnits;
  826. /// Track DBG_VALUEs of (unmodified) register units.
  827. DenseMap<unsigned, TinyPtrVector<MachineInstr*>> SeenDbgInstrs;
  828. /// Sink Copy instructions unused in the same block close to their uses in
  829. /// successors.
  830. bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF,
  831. const TargetRegisterInfo *TRI, const TargetInstrInfo *TII);
  832. };
  833. } // namespace
  834. char PostRAMachineSinking::ID = 0;
  835. char &llvm::PostRAMachineSinkingID = PostRAMachineSinking::ID;
  836. INITIALIZE_PASS(PostRAMachineSinking, "postra-machine-sink",
  837. "PostRA Machine Sink", false, false)
  838. static bool aliasWithRegsInLiveIn(MachineBasicBlock &MBB, unsigned Reg,
  839. const TargetRegisterInfo *TRI) {
  840. LiveRegUnits LiveInRegUnits(*TRI);
  841. LiveInRegUnits.addLiveIns(MBB);
  842. return !LiveInRegUnits.available(Reg);
  843. }
  844. static MachineBasicBlock *
  845. getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
  846. const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
  847. unsigned Reg, const TargetRegisterInfo *TRI) {
  848. // Try to find a single sinkable successor in which Reg is live-in.
  849. MachineBasicBlock *BB = nullptr;
  850. for (auto *SI : SinkableBBs) {
  851. if (aliasWithRegsInLiveIn(*SI, Reg, TRI)) {
  852. // If BB is set here, Reg is live-in to at least two sinkable successors,
  853. // so quit.
  854. if (BB)
  855. return nullptr;
  856. BB = SI;
  857. }
  858. }
  859. // Reg is not live-in to any sinkable successors.
  860. if (!BB)
  861. return nullptr;
  862. // Check if any register aliased with Reg is live-in in other successors.
  863. for (auto *SI : CurBB.successors()) {
  864. if (!SinkableBBs.count(SI) && aliasWithRegsInLiveIn(*SI, Reg, TRI))
  865. return nullptr;
  866. }
  867. return BB;
  868. }
  869. static MachineBasicBlock *
  870. getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
  871. const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
  872. ArrayRef<unsigned> DefedRegsInCopy,
  873. const TargetRegisterInfo *TRI) {
  874. MachineBasicBlock *SingleBB = nullptr;
  875. for (auto DefReg : DefedRegsInCopy) {
  876. MachineBasicBlock *BB =
  877. getSingleLiveInSuccBB(CurBB, SinkableBBs, DefReg, TRI);
  878. if (!BB || (SingleBB && SingleBB != BB))
  879. return nullptr;
  880. SingleBB = BB;
  881. }
  882. return SingleBB;
  883. }
  884. static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB,
  885. SmallVectorImpl<unsigned> &UsedOpsInCopy,
  886. LiveRegUnits &UsedRegUnits,
  887. const TargetRegisterInfo *TRI) {
  888. for (auto U : UsedOpsInCopy) {
  889. MachineOperand &MO = MI->getOperand(U);
  890. unsigned SrcReg = MO.getReg();
  891. if (!UsedRegUnits.available(SrcReg)) {
  892. MachineBasicBlock::iterator NI = std::next(MI->getIterator());
  893. for (MachineInstr &UI : make_range(NI, CurBB.end())) {
  894. if (UI.killsRegister(SrcReg, TRI)) {
  895. UI.clearRegisterKills(SrcReg, TRI);
  896. MO.setIsKill(true);
  897. break;
  898. }
  899. }
  900. }
  901. }
  902. }
  903. static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB,
  904. SmallVectorImpl<unsigned> &UsedOpsInCopy,
  905. SmallVectorImpl<unsigned> &DefedRegsInCopy) {
  906. MachineFunction &MF = *SuccBB->getParent();
  907. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  908. for (unsigned DefReg : DefedRegsInCopy)
  909. for (MCSubRegIterator S(DefReg, TRI, true); S.isValid(); ++S)
  910. SuccBB->removeLiveIn(*S);
  911. for (auto U : UsedOpsInCopy) {
  912. unsigned Reg = MI->getOperand(U).getReg();
  913. if (!SuccBB->isLiveIn(Reg))
  914. SuccBB->addLiveIn(Reg);
  915. }
  916. }
  917. static bool hasRegisterDependency(MachineInstr *MI,
  918. SmallVectorImpl<unsigned> &UsedOpsInCopy,
  919. SmallVectorImpl<unsigned> &DefedRegsInCopy,
  920. LiveRegUnits &ModifiedRegUnits,
  921. LiveRegUnits &UsedRegUnits) {
  922. bool HasRegDependency = false;
  923. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  924. MachineOperand &MO = MI->getOperand(i);
  925. if (!MO.isReg())
  926. continue;
  927. unsigned Reg = MO.getReg();
  928. if (!Reg)
  929. continue;
  930. if (MO.isDef()) {
  931. if (!ModifiedRegUnits.available(Reg) || !UsedRegUnits.available(Reg)) {
  932. HasRegDependency = true;
  933. break;
  934. }
  935. DefedRegsInCopy.push_back(Reg);
  936. // FIXME: instead of isUse(), readsReg() would be a better fix here,
  937. // For example, we can ignore modifications in reg with undef. However,
  938. // it's not perfectly clear if skipping the internal read is safe in all
  939. // other targets.
  940. } else if (MO.isUse()) {
  941. if (!ModifiedRegUnits.available(Reg)) {
  942. HasRegDependency = true;
  943. break;
  944. }
  945. UsedOpsInCopy.push_back(i);
  946. }
  947. }
  948. return HasRegDependency;
  949. }
  950. bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
  951. MachineFunction &MF,
  952. const TargetRegisterInfo *TRI,
  953. const TargetInstrInfo *TII) {
  954. SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs;
  955. // FIXME: For now, we sink only to a successor which has a single predecessor
  956. // so that we can directly sink COPY instructions to the successor without
  957. // adding any new block or branch instruction.
  958. for (MachineBasicBlock *SI : CurBB.successors())
  959. if (!SI->livein_empty() && SI->pred_size() == 1)
  960. SinkableBBs.insert(SI);
  961. if (SinkableBBs.empty())
  962. return false;
  963. bool Changed = false;
  964. // Track which registers have been modified and used between the end of the
  965. // block and the current instruction.
  966. ModifiedRegUnits.clear();
  967. UsedRegUnits.clear();
  968. SeenDbgInstrs.clear();
  969. for (auto I = CurBB.rbegin(), E = CurBB.rend(); I != E;) {
  970. MachineInstr *MI = &*I;
  971. ++I;
  972. // Track the operand index for use in Copy.
  973. SmallVector<unsigned, 2> UsedOpsInCopy;
  974. // Track the register number defed in Copy.
  975. SmallVector<unsigned, 2> DefedRegsInCopy;
  976. // We must sink this DBG_VALUE if its operand is sunk. To avoid searching
  977. // for DBG_VALUEs later, record them when they're encountered.
  978. if (MI->isDebugValue()) {
  979. auto &MO = MI->getOperand(0);
  980. if (MO.isReg() && TRI->isPhysicalRegister(MO.getReg())) {
  981. // Bail if we can already tell the sink would be rejected, rather
  982. // than needlessly accumulating lots of DBG_VALUEs.
  983. if (hasRegisterDependency(MI, UsedOpsInCopy, DefedRegsInCopy,
  984. ModifiedRegUnits, UsedRegUnits))
  985. continue;
  986. // Record debug use of this register.
  987. SeenDbgInstrs[MO.getReg()].push_back(MI);
  988. }
  989. continue;
  990. }
  991. if (MI->isDebugInstr())
  992. continue;
  993. // Do not move any instruction across function call.
  994. if (MI->isCall())
  995. return false;
  996. if (!MI->isCopy() || !MI->getOperand(0).isRenamable()) {
  997. LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
  998. TRI);
  999. continue;
  1000. }
  1001. // Don't sink the COPY if it would violate a register dependency.
  1002. if (hasRegisterDependency(MI, UsedOpsInCopy, DefedRegsInCopy,
  1003. ModifiedRegUnits, UsedRegUnits)) {
  1004. LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
  1005. TRI);
  1006. continue;
  1007. }
  1008. assert((!UsedOpsInCopy.empty() && !DefedRegsInCopy.empty()) &&
  1009. "Unexpect SrcReg or DefReg");
  1010. MachineBasicBlock *SuccBB =
  1011. getSingleLiveInSuccBB(CurBB, SinkableBBs, DefedRegsInCopy, TRI);
  1012. // Don't sink if we cannot find a single sinkable successor in which Reg
  1013. // is live-in.
  1014. if (!SuccBB) {
  1015. LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
  1016. TRI);
  1017. continue;
  1018. }
  1019. assert((SuccBB->pred_size() == 1 && *SuccBB->pred_begin() == &CurBB) &&
  1020. "Unexpected predecessor");
  1021. // Collect DBG_VALUEs that must sink with this copy.
  1022. SmallVector<MachineInstr *, 4> DbgValsToSink;
  1023. for (auto &MO : MI->operands()) {
  1024. if (!MO.isReg() || !MO.isDef())
  1025. continue;
  1026. unsigned reg = MO.getReg();
  1027. for (auto *MI : SeenDbgInstrs.lookup(reg))
  1028. DbgValsToSink.push_back(MI);
  1029. }
  1030. // Clear the kill flag if SrcReg is killed between MI and the end of the
  1031. // block.
  1032. clearKillFlags(MI, CurBB, UsedOpsInCopy, UsedRegUnits, TRI);
  1033. MachineBasicBlock::iterator InsertPos = SuccBB->getFirstNonPHI();
  1034. performSink(*MI, *SuccBB, InsertPos, &DbgValsToSink);
  1035. updateLiveIn(MI, SuccBB, UsedOpsInCopy, DefedRegsInCopy);
  1036. Changed = true;
  1037. ++NumPostRACopySink;
  1038. }
  1039. return Changed;
  1040. }
  1041. bool PostRAMachineSinking::runOnMachineFunction(MachineFunction &MF) {
  1042. if (skipFunction(MF.getFunction()))
  1043. return false;
  1044. bool Changed = false;
  1045. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  1046. const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
  1047. ModifiedRegUnits.init(*TRI);
  1048. UsedRegUnits.init(*TRI);
  1049. for (auto &BB : MF)
  1050. Changed |= tryToSinkCopy(BB, MF, TRI, TII);
  1051. return Changed;
  1052. }