MachineSink.cpp 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. //===- MachineSink.cpp - Sinking for machine instructions -----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass moves instructions into successor blocks when possible, so that
  10. // they aren't executed on paths where their results aren't needed.
  11. //
  12. // This pass is not intended to be a replacement or a complete alternative
  13. // for an LLVM-IR-level sinking pass. It is only designed to sink simple
  14. // constructs that are not exposed before lowering and instruction selection.
  15. //
  16. //===----------------------------------------------------------------------===//
  17. #include "llvm/ADT/SetVector.h"
  18. #include "llvm/ADT/SmallSet.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. #include "llvm/ADT/SparseBitVector.h"
  21. #include "llvm/ADT/Statistic.h"
  22. #include "llvm/Analysis/AliasAnalysis.h"
  23. #include "llvm/CodeGen/MachineBasicBlock.h"
  24. #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
  25. #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
  26. #include "llvm/CodeGen/MachineDominators.h"
  27. #include "llvm/CodeGen/MachineFunction.h"
  28. #include "llvm/CodeGen/MachineFunctionPass.h"
  29. #include "llvm/CodeGen/MachineInstr.h"
  30. #include "llvm/CodeGen/MachineLoopInfo.h"
  31. #include "llvm/CodeGen/MachineOperand.h"
  32. #include "llvm/CodeGen/MachinePostDominators.h"
  33. #include "llvm/CodeGen/MachineRegisterInfo.h"
  34. #include "llvm/CodeGen/TargetInstrInfo.h"
  35. #include "llvm/CodeGen/TargetRegisterInfo.h"
  36. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  37. #include "llvm/IR/BasicBlock.h"
  38. #include "llvm/IR/DebugInfoMetadata.h"
  39. #include "llvm/IR/LLVMContext.h"
  40. #include "llvm/MC/MCRegisterInfo.h"
  41. #include "llvm/Pass.h"
  42. #include "llvm/Support/BranchProbability.h"
  43. #include "llvm/Support/CommandLine.h"
  44. #include "llvm/Support/Debug.h"
  45. #include "llvm/Support/raw_ostream.h"
  46. #include <algorithm>
  47. #include <cassert>
  48. #include <cstdint>
  49. #include <map>
  50. #include <utility>
  51. #include <vector>
  52. using namespace llvm;
  53. #define DEBUG_TYPE "machine-sink"
  54. static cl::opt<bool>
  55. SplitEdges("machine-sink-split",
  56. cl::desc("Split critical edges during machine sinking"),
  57. cl::init(true), cl::Hidden);
  58. static cl::opt<bool>
  59. UseBlockFreqInfo("machine-sink-bfi",
  60. cl::desc("Use block frequency info to find successors to sink"),
  61. cl::init(true), cl::Hidden);
  62. static cl::opt<unsigned> SplitEdgeProbabilityThreshold(
  63. "machine-sink-split-probability-threshold",
  64. cl::desc(
  65. "Percentage threshold for splitting single-instruction critical edge. "
  66. "If the branch threshold is higher than this threshold, we allow "
  67. "speculative execution of up to 1 instruction to avoid branching to "
  68. "splitted critical edge"),
  69. cl::init(40), cl::Hidden);
  70. STATISTIC(NumSunk, "Number of machine instructions sunk");
  71. STATISTIC(NumSplit, "Number of critical edges split");
  72. STATISTIC(NumCoalesces, "Number of copies coalesced");
  73. STATISTIC(NumPostRACopySink, "Number of copies sunk after RA");
  74. namespace {
  75. class MachineSinking : public MachineFunctionPass {
  76. const TargetInstrInfo *TII;
  77. const TargetRegisterInfo *TRI;
  78. MachineRegisterInfo *MRI; // Machine register information
  79. MachineDominatorTree *DT; // Machine dominator tree
  80. MachinePostDominatorTree *PDT; // Machine post dominator tree
  81. MachineLoopInfo *LI;
  82. const MachineBlockFrequencyInfo *MBFI;
  83. const MachineBranchProbabilityInfo *MBPI;
  84. AliasAnalysis *AA;
  85. // Remember which edges have been considered for breaking.
  86. SmallSet<std::pair<MachineBasicBlock*, MachineBasicBlock*>, 8>
  87. CEBCandidates;
  88. // Remember which edges we are about to split.
  89. // This is different from CEBCandidates since those edges
  90. // will be split.
  91. SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit;
  92. SparseBitVector<> RegsToClearKillFlags;
  93. using AllSuccsCache =
  94. std::map<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>;
  95. public:
  96. static char ID; // Pass identification
  97. MachineSinking() : MachineFunctionPass(ID) {
  98. initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
  99. }
  100. bool runOnMachineFunction(MachineFunction &MF) override;
  101. void getAnalysisUsage(AnalysisUsage &AU) const override {
  102. AU.setPreservesCFG();
  103. MachineFunctionPass::getAnalysisUsage(AU);
  104. AU.addRequired<AAResultsWrapperPass>();
  105. AU.addRequired<MachineDominatorTree>();
  106. AU.addRequired<MachinePostDominatorTree>();
  107. AU.addRequired<MachineLoopInfo>();
  108. AU.addRequired<MachineBranchProbabilityInfo>();
  109. AU.addPreserved<MachineDominatorTree>();
  110. AU.addPreserved<MachinePostDominatorTree>();
  111. AU.addPreserved<MachineLoopInfo>();
  112. if (UseBlockFreqInfo)
  113. AU.addRequired<MachineBlockFrequencyInfo>();
  114. }
  115. void releaseMemory() override {
  116. CEBCandidates.clear();
  117. }
  118. private:
  119. bool ProcessBlock(MachineBasicBlock &MBB);
  120. bool isWorthBreakingCriticalEdge(MachineInstr &MI,
  121. MachineBasicBlock *From,
  122. MachineBasicBlock *To);
  123. /// Postpone the splitting of the given critical
  124. /// edge (\p From, \p To).
  125. ///
  126. /// We do not split the edges on the fly. Indeed, this invalidates
  127. /// the dominance information and thus triggers a lot of updates
  128. /// of that information underneath.
  129. /// Instead, we postpone all the splits after each iteration of
  130. /// the main loop. That way, the information is at least valid
  131. /// for the lifetime of an iteration.
  132. ///
  133. /// \return True if the edge is marked as toSplit, false otherwise.
  134. /// False can be returned if, for instance, this is not profitable.
  135. bool PostponeSplitCriticalEdge(MachineInstr &MI,
  136. MachineBasicBlock *From,
  137. MachineBasicBlock *To,
  138. bool BreakPHIEdge);
  139. bool SinkInstruction(MachineInstr &MI, bool &SawStore,
  140. AllSuccsCache &AllSuccessors);
  141. bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
  142. MachineBasicBlock *DefMBB,
  143. bool &BreakPHIEdge, bool &LocalUse) const;
  144. MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
  145. bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
  146. bool isProfitableToSinkTo(unsigned Reg, MachineInstr &MI,
  147. MachineBasicBlock *MBB,
  148. MachineBasicBlock *SuccToSinkTo,
  149. AllSuccsCache &AllSuccessors);
  150. bool PerformTrivialForwardCoalescing(MachineInstr &MI,
  151. MachineBasicBlock *MBB);
  152. SmallVector<MachineBasicBlock *, 4> &
  153. GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
  154. AllSuccsCache &AllSuccessors) const;
  155. };
  156. } // end anonymous namespace
  157. char MachineSinking::ID = 0;
  158. char &llvm::MachineSinkingID = MachineSinking::ID;
  159. INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE,
  160. "Machine code sinking", false, false)
  161. INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
  162. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  163. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  164. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  165. INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE,
  166. "Machine code sinking", false, false)
  167. bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
  168. MachineBasicBlock *MBB) {
  169. if (!MI.isCopy())
  170. return false;
  171. Register SrcReg = MI.getOperand(1).getReg();
  172. Register DstReg = MI.getOperand(0).getReg();
  173. if (!Register::isVirtualRegister(SrcReg) ||
  174. !Register::isVirtualRegister(DstReg) || !MRI->hasOneNonDBGUse(SrcReg))
  175. return false;
  176. const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
  177. const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
  178. if (SRC != DRC)
  179. return false;
  180. MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
  181. if (DefMI->isCopyLike())
  182. return false;
  183. LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI);
  184. LLVM_DEBUG(dbgs() << "*** to: " << MI);
  185. MRI->replaceRegWith(DstReg, SrcReg);
  186. MI.eraseFromParent();
  187. // Conservatively, clear any kill flags, since it's possible that they are no
  188. // longer correct.
  189. MRI->clearKillFlags(SrcReg);
  190. ++NumCoalesces;
  191. return true;
  192. }
  193. /// AllUsesDominatedByBlock - Return true if all uses of the specified register
  194. /// occur in blocks dominated by the specified block. If any use is in the
  195. /// definition block, then return false since it is never legal to move def
  196. /// after uses.
  197. bool
  198. MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
  199. MachineBasicBlock *MBB,
  200. MachineBasicBlock *DefMBB,
  201. bool &BreakPHIEdge,
  202. bool &LocalUse) const {
  203. assert(Register::isVirtualRegister(Reg) && "Only makes sense for vregs");
  204. // Ignore debug uses because debug info doesn't affect the code.
  205. if (MRI->use_nodbg_empty(Reg))
  206. return true;
  207. // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
  208. // into and they are all PHI nodes. In this case, machine-sink must break
  209. // the critical edge first. e.g.
  210. //
  211. // %bb.1: derived from LLVM BB %bb4.preheader
  212. // Predecessors according to CFG: %bb.0
  213. // ...
  214. // %reg16385 = DEC64_32r %reg16437, implicit-def dead %eflags
  215. // ...
  216. // JE_4 <%bb.37>, implicit %eflags
  217. // Successors according to CFG: %bb.37 %bb.2
  218. //
  219. // %bb.2: derived from LLVM BB %bb.nph
  220. // Predecessors according to CFG: %bb.0 %bb.1
  221. // %reg16386 = PHI %reg16434, %bb.0, %reg16385, %bb.1
  222. BreakPHIEdge = true;
  223. for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
  224. MachineInstr *UseInst = MO.getParent();
  225. unsigned OpNo = &MO - &UseInst->getOperand(0);
  226. MachineBasicBlock *UseBlock = UseInst->getParent();
  227. if (!(UseBlock == MBB && UseInst->isPHI() &&
  228. UseInst->getOperand(OpNo+1).getMBB() == DefMBB)) {
  229. BreakPHIEdge = false;
  230. break;
  231. }
  232. }
  233. if (BreakPHIEdge)
  234. return true;
  235. for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
  236. // Determine the block of the use.
  237. MachineInstr *UseInst = MO.getParent();
  238. unsigned OpNo = &MO - &UseInst->getOperand(0);
  239. MachineBasicBlock *UseBlock = UseInst->getParent();
  240. if (UseInst->isPHI()) {
  241. // PHI nodes use the operand in the predecessor block, not the block with
  242. // the PHI.
  243. UseBlock = UseInst->getOperand(OpNo+1).getMBB();
  244. } else if (UseBlock == DefMBB) {
  245. LocalUse = true;
  246. return false;
  247. }
  248. // Check that it dominates.
  249. if (!DT->dominates(MBB, UseBlock))
  250. return false;
  251. }
  252. return true;
  253. }
  254. bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
  255. if (skipFunction(MF.getFunction()))
  256. return false;
  257. LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n");
  258. TII = MF.getSubtarget().getInstrInfo();
  259. TRI = MF.getSubtarget().getRegisterInfo();
  260. MRI = &MF.getRegInfo();
  261. DT = &getAnalysis<MachineDominatorTree>();
  262. PDT = &getAnalysis<MachinePostDominatorTree>();
  263. LI = &getAnalysis<MachineLoopInfo>();
  264. MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr;
  265. MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
  266. AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  267. bool EverMadeChange = false;
  268. while (true) {
  269. bool MadeChange = false;
  270. // Process all basic blocks.
  271. CEBCandidates.clear();
  272. ToSplit.clear();
  273. for (auto &MBB: MF)
  274. MadeChange |= ProcessBlock(MBB);
  275. // If we have anything we marked as toSplit, split it now.
  276. for (auto &Pair : ToSplit) {
  277. auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this);
  278. if (NewSucc != nullptr) {
  279. LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "
  280. << printMBBReference(*Pair.first) << " -- "
  281. << printMBBReference(*NewSucc) << " -- "
  282. << printMBBReference(*Pair.second) << '\n');
  283. MadeChange = true;
  284. ++NumSplit;
  285. } else
  286. LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");
  287. }
  288. // If this iteration over the code changed anything, keep iterating.
  289. if (!MadeChange) break;
  290. EverMadeChange = true;
  291. }
  292. // Now clear any kill flags for recorded registers.
  293. for (auto I : RegsToClearKillFlags)
  294. MRI->clearKillFlags(I);
  295. RegsToClearKillFlags.clear();
  296. return EverMadeChange;
  297. }
  298. bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
  299. // Can't sink anything out of a block that has less than two successors.
  300. if (MBB.succ_size() <= 1 || MBB.empty()) return false;
  301. // Don't bother sinking code out of unreachable blocks. In addition to being
  302. // unprofitable, it can also lead to infinite looping, because in an
  303. // unreachable loop there may be nowhere to stop.
  304. if (!DT->isReachableFromEntry(&MBB)) return false;
  305. bool MadeChange = false;
  306. // Cache all successors, sorted by frequency info and loop depth.
  307. AllSuccsCache AllSuccessors;
  308. // Walk the basic block bottom-up. Remember if we saw a store.
  309. MachineBasicBlock::iterator I = MBB.end();
  310. --I;
  311. bool ProcessedBegin, SawStore = false;
  312. do {
  313. MachineInstr &MI = *I; // The instruction to sink.
  314. // Predecrement I (if it's not begin) so that it isn't invalidated by
  315. // sinking.
  316. ProcessedBegin = I == MBB.begin();
  317. if (!ProcessedBegin)
  318. --I;
  319. if (MI.isDebugInstr())
  320. continue;
  321. bool Joined = PerformTrivialForwardCoalescing(MI, &MBB);
  322. if (Joined) {
  323. MadeChange = true;
  324. continue;
  325. }
  326. if (SinkInstruction(MI, SawStore, AllSuccessors)) {
  327. ++NumSunk;
  328. MadeChange = true;
  329. }
  330. // If we just processed the first instruction in the block, we're done.
  331. } while (!ProcessedBegin);
  332. return MadeChange;
  333. }
  334. bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI,
  335. MachineBasicBlock *From,
  336. MachineBasicBlock *To) {
  337. // FIXME: Need much better heuristics.
  338. // If the pass has already considered breaking this edge (during this pass
  339. // through the function), then let's go ahead and break it. This means
  340. // sinking multiple "cheap" instructions into the same block.
  341. if (!CEBCandidates.insert(std::make_pair(From, To)).second)
  342. return true;
  343. if (!MI.isCopy() && !TII->isAsCheapAsAMove(MI))
  344. return true;
  345. if (From->isSuccessor(To) && MBPI->getEdgeProbability(From, To) <=
  346. BranchProbability(SplitEdgeProbabilityThreshold, 100))
  347. return true;
  348. // MI is cheap, we probably don't want to break the critical edge for it.
  349. // However, if this would allow some definitions of its source operands
  350. // to be sunk then it's probably worth it.
  351. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
  352. const MachineOperand &MO = MI.getOperand(i);
  353. if (!MO.isReg() || !MO.isUse())
  354. continue;
  355. Register Reg = MO.getReg();
  356. if (Reg == 0)
  357. continue;
  358. // We don't move live definitions of physical registers,
  359. // so sinking their uses won't enable any opportunities.
  360. if (Register::isPhysicalRegister(Reg))
  361. continue;
  362. // If this instruction is the only user of a virtual register,
  363. // check if breaking the edge will enable sinking
  364. // both this instruction and the defining instruction.
  365. if (MRI->hasOneNonDBGUse(Reg)) {
  366. // If the definition resides in same MBB,
  367. // claim it's likely we can sink these together.
  368. // If definition resides elsewhere, we aren't
  369. // blocking it from being sunk so don't break the edge.
  370. MachineInstr *DefMI = MRI->getVRegDef(Reg);
  371. if (DefMI->getParent() == MI.getParent())
  372. return true;
  373. }
  374. }
  375. return false;
  376. }
  377. bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,
  378. MachineBasicBlock *FromBB,
  379. MachineBasicBlock *ToBB,
  380. bool BreakPHIEdge) {
  381. if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
  382. return false;
  383. // Avoid breaking back edge. From == To means backedge for single BB loop.
  384. if (!SplitEdges || FromBB == ToBB)
  385. return false;
  386. // Check for backedges of more "complex" loops.
  387. if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
  388. LI->isLoopHeader(ToBB))
  389. return false;
  390. // It's not always legal to break critical edges and sink the computation
  391. // to the edge.
  392. //
  393. // %bb.1:
  394. // v1024
  395. // Beq %bb.3
  396. // <fallthrough>
  397. // %bb.2:
  398. // ... no uses of v1024
  399. // <fallthrough>
  400. // %bb.3:
  401. // ...
  402. // = v1024
  403. //
  404. // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted:
  405. //
  406. // %bb.1:
  407. // ...
  408. // Bne %bb.2
  409. // %bb.4:
  410. // v1024 =
  411. // B %bb.3
  412. // %bb.2:
  413. // ... no uses of v1024
  414. // <fallthrough>
  415. // %bb.3:
  416. // ...
  417. // = v1024
  418. //
  419. // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3
  420. // flow. We need to ensure the new basic block where the computation is
  421. // sunk to dominates all the uses.
  422. // It's only legal to break critical edge and sink the computation to the
  423. // new block if all the predecessors of "To", except for "From", are
  424. // not dominated by "From". Given SSA property, this means these
  425. // predecessors are dominated by "To".
  426. //
  427. // There is no need to do this check if all the uses are PHI nodes. PHI
  428. // sources are only defined on the specific predecessor edges.
  429. if (!BreakPHIEdge) {
  430. for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
  431. E = ToBB->pred_end(); PI != E; ++PI) {
  432. if (*PI == FromBB)
  433. continue;
  434. if (!DT->dominates(ToBB, *PI))
  435. return false;
  436. }
  437. }
  438. ToSplit.insert(std::make_pair(FromBB, ToBB));
  439. return true;
  440. }
  441. /// isProfitableToSinkTo - Return true if it is profitable to sink MI.
  442. bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr &MI,
  443. MachineBasicBlock *MBB,
  444. MachineBasicBlock *SuccToSinkTo,
  445. AllSuccsCache &AllSuccessors) {
  446. assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
  447. if (MBB == SuccToSinkTo)
  448. return false;
  449. // It is profitable if SuccToSinkTo does not post dominate current block.
  450. if (!PDT->dominates(SuccToSinkTo, MBB))
  451. return true;
  452. // It is profitable to sink an instruction from a deeper loop to a shallower
  453. // loop, even if the latter post-dominates the former (PR21115).
  454. if (LI->getLoopDepth(MBB) > LI->getLoopDepth(SuccToSinkTo))
  455. return true;
  456. // Check if only use in post dominated block is PHI instruction.
  457. bool NonPHIUse = false;
  458. for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
  459. MachineBasicBlock *UseBlock = UseInst.getParent();
  460. if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
  461. NonPHIUse = true;
  462. }
  463. if (!NonPHIUse)
  464. return true;
  465. // If SuccToSinkTo post dominates then also it may be profitable if MI
  466. // can further profitably sinked into another block in next round.
  467. bool BreakPHIEdge = false;
  468. // FIXME - If finding successor is compile time expensive then cache results.
  469. if (MachineBasicBlock *MBB2 =
  470. FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
  471. return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
  472. // If SuccToSinkTo is final destination and it is a post dominator of current
  473. // block then it is not profitable to sink MI into SuccToSinkTo block.
  474. return false;
  475. }
  476. /// Get the sorted sequence of successors for this MachineBasicBlock, possibly
  477. /// computing it if it was not already cached.
  478. SmallVector<MachineBasicBlock *, 4> &
  479. MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
  480. AllSuccsCache &AllSuccessors) const {
  481. // Do we have the sorted successors in cache ?
  482. auto Succs = AllSuccessors.find(MBB);
  483. if (Succs != AllSuccessors.end())
  484. return Succs->second;
  485. SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->succ_begin(),
  486. MBB->succ_end());
  487. // Handle cases where sinking can happen but where the sink point isn't a
  488. // successor. For example:
  489. //
  490. // x = computation
  491. // if () {} else {}
  492. // use x
  493. //
  494. const std::vector<MachineDomTreeNode *> &Children =
  495. DT->getNode(MBB)->getChildren();
  496. for (const auto &DTChild : Children)
  497. // DomTree children of MBB that have MBB as immediate dominator are added.
  498. if (DTChild->getIDom()->getBlock() == MI.getParent() &&
  499. // Skip MBBs already added to the AllSuccs vector above.
  500. !MBB->isSuccessor(DTChild->getBlock()))
  501. AllSuccs.push_back(DTChild->getBlock());
  502. // Sort Successors according to their loop depth or block frequency info.
  503. llvm::stable_sort(
  504. AllSuccs, [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
  505. uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
  506. uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
  507. bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
  508. return HasBlockFreq ? LHSFreq < RHSFreq
  509. : LI->getLoopDepth(L) < LI->getLoopDepth(R);
  510. });
  511. auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
  512. return it.first->second;
  513. }
  514. /// FindSuccToSinkTo - Find a successor to sink this instruction to.
  515. MachineBasicBlock *
  516. MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
  517. bool &BreakPHIEdge,
  518. AllSuccsCache &AllSuccessors) {
  519. assert (MBB && "Invalid MachineBasicBlock!");
  520. // Loop over all the operands of the specified instruction. If there is
  521. // anything we can't handle, bail out.
  522. // SuccToSinkTo - This is the successor to sink this instruction to, once we
  523. // decide.
  524. MachineBasicBlock *SuccToSinkTo = nullptr;
  525. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
  526. const MachineOperand &MO = MI.getOperand(i);
  527. if (!MO.isReg()) continue; // Ignore non-register operands.
  528. Register Reg = MO.getReg();
  529. if (Reg == 0) continue;
  530. if (Register::isPhysicalRegister(Reg)) {
  531. if (MO.isUse()) {
  532. // If the physreg has no defs anywhere, it's just an ambient register
  533. // and we can freely move its uses. Alternatively, if it's allocatable,
  534. // it could get allocated to something with a def during allocation.
  535. if (!MRI->isConstantPhysReg(Reg))
  536. return nullptr;
  537. } else if (!MO.isDead()) {
  538. // A def that isn't dead. We can't move it.
  539. return nullptr;
  540. }
  541. } else {
  542. // Virtual register uses are always safe to sink.
  543. if (MO.isUse()) continue;
  544. // If it's not safe to move defs of the register class, then abort.
  545. if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
  546. return nullptr;
  547. // Virtual register defs can only be sunk if all their uses are in blocks
  548. // dominated by one of the successors.
  549. if (SuccToSinkTo) {
  550. // If a previous operand picked a block to sink to, then this operand
  551. // must be sinkable to the same block.
  552. bool LocalUse = false;
  553. if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
  554. BreakPHIEdge, LocalUse))
  555. return nullptr;
  556. continue;
  557. }
  558. // Otherwise, we should look at all the successors and decide which one
  559. // we should sink to. If we have reliable block frequency information
  560. // (frequency != 0) available, give successors with smaller frequencies
  561. // higher priority, otherwise prioritize smaller loop depths.
  562. for (MachineBasicBlock *SuccBlock :
  563. GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
  564. bool LocalUse = false;
  565. if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
  566. BreakPHIEdge, LocalUse)) {
  567. SuccToSinkTo = SuccBlock;
  568. break;
  569. }
  570. if (LocalUse)
  571. // Def is used locally, it's never safe to move this def.
  572. return nullptr;
  573. }
  574. // If we couldn't find a block to sink to, ignore this instruction.
  575. if (!SuccToSinkTo)
  576. return nullptr;
  577. if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
  578. return nullptr;
  579. }
  580. }
  581. // It is not possible to sink an instruction into its own block. This can
  582. // happen with loops.
  583. if (MBB == SuccToSinkTo)
  584. return nullptr;
  585. // It's not safe to sink instructions to EH landing pad. Control flow into
  586. // landing pad is implicitly defined.
  587. if (SuccToSinkTo && SuccToSinkTo->isEHPad())
  588. return nullptr;
  589. return SuccToSinkTo;
  590. }
  591. /// Return true if MI is likely to be usable as a memory operation by the
  592. /// implicit null check optimization.
  593. ///
  594. /// This is a "best effort" heuristic, and should not be relied upon for
  595. /// correctness. This returning true does not guarantee that the implicit null
  596. /// check optimization is legal over MI, and this returning false does not
  597. /// guarantee MI cannot possibly be used to do a null check.
  598. static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
  599. const TargetInstrInfo *TII,
  600. const TargetRegisterInfo *TRI) {
  601. using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate;
  602. auto *MBB = MI.getParent();
  603. if (MBB->pred_size() != 1)
  604. return false;
  605. auto *PredMBB = *MBB->pred_begin();
  606. auto *PredBB = PredMBB->getBasicBlock();
  607. // Frontends that don't use implicit null checks have no reason to emit
  608. // branches with make.implicit metadata, and this function should always
  609. // return false for them.
  610. if (!PredBB ||
  611. !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
  612. return false;
  613. const MachineOperand *BaseOp;
  614. int64_t Offset;
  615. if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI))
  616. return false;
  617. if (!BaseOp->isReg())
  618. return false;
  619. if (!(MI.mayLoad() && !MI.isPredicable()))
  620. return false;
  621. MachineBranchPredicate MBP;
  622. if (TII->analyzeBranchPredicate(*PredMBB, MBP, false))
  623. return false;
  624. return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
  625. (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
  626. MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
  627. MBP.LHS.getReg() == BaseOp->getReg();
  628. }
  629. /// Sink an instruction and its associated debug instructions. If the debug
  630. /// instructions to be sunk are already known, they can be provided in DbgVals.
  631. static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo,
  632. MachineBasicBlock::iterator InsertPos,
  633. SmallVectorImpl<MachineInstr *> *DbgVals = nullptr) {
  634. // If debug values are provided use those, otherwise call collectDebugValues.
  635. SmallVector<MachineInstr *, 2> DbgValuesToSink;
  636. if (DbgVals)
  637. DbgValuesToSink.insert(DbgValuesToSink.begin(),
  638. DbgVals->begin(), DbgVals->end());
  639. else
  640. MI.collectDebugValues(DbgValuesToSink);
  641. // If we cannot find a location to use (merge with), then we erase the debug
  642. // location to prevent debug-info driven tools from potentially reporting
  643. // wrong location information.
  644. if (!SuccToSinkTo.empty() && InsertPos != SuccToSinkTo.end())
  645. MI.setDebugLoc(DILocation::getMergedLocation(MI.getDebugLoc(),
  646. InsertPos->getDebugLoc()));
  647. else
  648. MI.setDebugLoc(DebugLoc());
  649. // Move the instruction.
  650. MachineBasicBlock *ParentBlock = MI.getParent();
  651. SuccToSinkTo.splice(InsertPos, ParentBlock, MI,
  652. ++MachineBasicBlock::iterator(MI));
  653. // Move previously adjacent debug value instructions to the insert position.
  654. for (SmallVectorImpl<MachineInstr *>::iterator DBI = DbgValuesToSink.begin(),
  655. DBE = DbgValuesToSink.end();
  656. DBI != DBE; ++DBI) {
  657. MachineInstr *DbgMI = *DBI;
  658. SuccToSinkTo.splice(InsertPos, ParentBlock, DbgMI,
  659. ++MachineBasicBlock::iterator(DbgMI));
  660. }
  661. }
  662. /// SinkInstruction - Determine whether it is safe to sink the specified machine
  663. /// instruction out of its current block into a successor.
  664. bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
  665. AllSuccsCache &AllSuccessors) {
  666. // Don't sink instructions that the target prefers not to sink.
  667. if (!TII->shouldSink(MI))
  668. return false;
  669. // Check if it's safe to move the instruction.
  670. if (!MI.isSafeToMove(AA, SawStore))
  671. return false;
  672. // Convergent operations may not be made control-dependent on additional
  673. // values.
  674. if (MI.isConvergent())
  675. return false;
  676. // Don't break implicit null checks. This is a performance heuristic, and not
  677. // required for correctness.
  678. if (SinkingPreventsImplicitNullCheck(MI, TII, TRI))
  679. return false;
  680. // FIXME: This should include support for sinking instructions within the
  681. // block they are currently in to shorten the live ranges. We often get
  682. // instructions sunk into the top of a large block, but it would be better to
  683. // also sink them down before their first use in the block. This xform has to
  684. // be careful not to *increase* register pressure though, e.g. sinking
  685. // "x = y + z" down if it kills y and z would increase the live ranges of y
  686. // and z and only shrink the live range of x.
  687. bool BreakPHIEdge = false;
  688. MachineBasicBlock *ParentBlock = MI.getParent();
  689. MachineBasicBlock *SuccToSinkTo =
  690. FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
  691. // If there are no outputs, it must have side-effects.
  692. if (!SuccToSinkTo)
  693. return false;
  694. // If the instruction to move defines a dead physical register which is live
  695. // when leaving the basic block, don't move it because it could turn into a
  696. // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
  697. for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
  698. const MachineOperand &MO = MI.getOperand(I);
  699. if (!MO.isReg()) continue;
  700. Register Reg = MO.getReg();
  701. if (Reg == 0 || !Register::isPhysicalRegister(Reg))
  702. continue;
  703. if (SuccToSinkTo->isLiveIn(Reg))
  704. return false;
  705. }
  706. LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);
  707. // If the block has multiple predecessors, this is a critical edge.
  708. // Decide if we can sink along it or need to break the edge.
  709. if (SuccToSinkTo->pred_size() > 1) {
  710. // We cannot sink a load across a critical edge - there may be stores in
  711. // other code paths.
  712. bool TryBreak = false;
  713. bool store = true;
  714. if (!MI.isSafeToMove(AA, store)) {
  715. LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
  716. TryBreak = true;
  717. }
  718. // We don't want to sink across a critical edge if we don't dominate the
  719. // successor. We could be introducing calculations to new code paths.
  720. if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
  721. LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
  722. TryBreak = true;
  723. }
  724. // Don't sink instructions into a loop.
  725. if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
  726. LLVM_DEBUG(dbgs() << " *** NOTE: Loop header found\n");
  727. TryBreak = true;
  728. }
  729. // Otherwise we are OK with sinking along a critical edge.
  730. if (!TryBreak)
  731. LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n");
  732. else {
  733. // Mark this edge as to be split.
  734. // If the edge can actually be split, the next iteration of the main loop
  735. // will sink MI in the newly created block.
  736. bool Status =
  737. PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
  738. if (!Status)
  739. LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
  740. "break critical edge\n");
  741. // The instruction will not be sunk this time.
  742. return false;
  743. }
  744. }
  745. if (BreakPHIEdge) {
  746. // BreakPHIEdge is true if all the uses are in the successor MBB being
  747. // sunken into and they are all PHI nodes. In this case, machine-sink must
  748. // break the critical edge first.
  749. bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
  750. SuccToSinkTo, BreakPHIEdge);
  751. if (!Status)
  752. LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
  753. "break critical edge\n");
  754. // The instruction will not be sunk this time.
  755. return false;
  756. }
  757. // Determine where to insert into. Skip phi nodes.
  758. MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
  759. while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
  760. ++InsertPos;
  761. performSink(MI, *SuccToSinkTo, InsertPos);
  762. // Conservatively, clear any kill flags, since it's possible that they are no
  763. // longer correct.
  764. // Note that we have to clear the kill flags for any register this instruction
  765. // uses as we may sink over another instruction which currently kills the
  766. // used registers.
  767. for (MachineOperand &MO : MI.operands()) {
  768. if (MO.isReg() && MO.isUse())
  769. RegsToClearKillFlags.set(MO.getReg()); // Remember to clear kill flags.
  770. }
  771. return true;
  772. }
  773. //===----------------------------------------------------------------------===//
  774. // This pass is not intended to be a replacement or a complete alternative
  775. // for the pre-ra machine sink pass. It is only designed to sink COPY
  776. // instructions which should be handled after RA.
  777. //
  778. // This pass sinks COPY instructions into a successor block, if the COPY is not
  779. // used in the current block and the COPY is live-in to a single successor
  780. // (i.e., doesn't require the COPY to be duplicated). This avoids executing the
  781. // copy on paths where their results aren't needed. This also exposes
  782. // additional opportunites for dead copy elimination and shrink wrapping.
  783. //
  784. // These copies were either not handled by or are inserted after the MachineSink
  785. // pass. As an example of the former case, the MachineSink pass cannot sink
  786. // COPY instructions with allocatable source registers; for AArch64 these type
  787. // of copy instructions are frequently used to move function parameters (PhyReg)
  788. // into virtual registers in the entry block.
  789. //
  790. // For the machine IR below, this pass will sink %w19 in the entry into its
  791. // successor (%bb.1) because %w19 is only live-in in %bb.1.
  792. // %bb.0:
  793. // %wzr = SUBSWri %w1, 1
  794. // %w19 = COPY %w0
  795. // Bcc 11, %bb.2
  796. // %bb.1:
  797. // Live Ins: %w19
  798. // BL @fun
  799. // %w0 = ADDWrr %w0, %w19
  800. // RET %w0
  801. // %bb.2:
  802. // %w0 = COPY %wzr
  803. // RET %w0
  804. // As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
  805. // able to see %bb.0 as a candidate.
  806. //===----------------------------------------------------------------------===//
  807. namespace {
  808. class PostRAMachineSinking : public MachineFunctionPass {
  809. public:
  810. bool runOnMachineFunction(MachineFunction &MF) override;
  811. static char ID;
  812. PostRAMachineSinking() : MachineFunctionPass(ID) {}
  813. StringRef getPassName() const override { return "PostRA Machine Sink"; }
  814. void getAnalysisUsage(AnalysisUsage &AU) const override {
  815. AU.setPreservesCFG();
  816. MachineFunctionPass::getAnalysisUsage(AU);
  817. }
  818. MachineFunctionProperties getRequiredProperties() const override {
  819. return MachineFunctionProperties().set(
  820. MachineFunctionProperties::Property::NoVRegs);
  821. }
  822. private:
  823. /// Track which register units have been modified and used.
  824. LiveRegUnits ModifiedRegUnits, UsedRegUnits;
  825. /// Track DBG_VALUEs of (unmodified) register units. Each DBG_VALUE has an
  826. /// entry in this map for each unit it touches.
  827. DenseMap<unsigned, TinyPtrVector<MachineInstr *>> SeenDbgInstrs;
  828. /// Sink Copy instructions unused in the same block close to their uses in
  829. /// successors.
  830. bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF,
  831. const TargetRegisterInfo *TRI, const TargetInstrInfo *TII);
  832. };
  833. } // namespace
  834. char PostRAMachineSinking::ID = 0;
  835. char &llvm::PostRAMachineSinkingID = PostRAMachineSinking::ID;
  836. INITIALIZE_PASS(PostRAMachineSinking, "postra-machine-sink",
  837. "PostRA Machine Sink", false, false)
  838. static bool aliasWithRegsInLiveIn(MachineBasicBlock &MBB, unsigned Reg,
  839. const TargetRegisterInfo *TRI) {
  840. LiveRegUnits LiveInRegUnits(*TRI);
  841. LiveInRegUnits.addLiveIns(MBB);
  842. return !LiveInRegUnits.available(Reg);
  843. }
  844. static MachineBasicBlock *
  845. getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
  846. const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
  847. unsigned Reg, const TargetRegisterInfo *TRI) {
  848. // Try to find a single sinkable successor in which Reg is live-in.
  849. MachineBasicBlock *BB = nullptr;
  850. for (auto *SI : SinkableBBs) {
  851. if (aliasWithRegsInLiveIn(*SI, Reg, TRI)) {
  852. // If BB is set here, Reg is live-in to at least two sinkable successors,
  853. // so quit.
  854. if (BB)
  855. return nullptr;
  856. BB = SI;
  857. }
  858. }
  859. // Reg is not live-in to any sinkable successors.
  860. if (!BB)
  861. return nullptr;
  862. // Check if any register aliased with Reg is live-in in other successors.
  863. for (auto *SI : CurBB.successors()) {
  864. if (!SinkableBBs.count(SI) && aliasWithRegsInLiveIn(*SI, Reg, TRI))
  865. return nullptr;
  866. }
  867. return BB;
  868. }
  869. static MachineBasicBlock *
  870. getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
  871. const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
  872. ArrayRef<unsigned> DefedRegsInCopy,
  873. const TargetRegisterInfo *TRI) {
  874. MachineBasicBlock *SingleBB = nullptr;
  875. for (auto DefReg : DefedRegsInCopy) {
  876. MachineBasicBlock *BB =
  877. getSingleLiveInSuccBB(CurBB, SinkableBBs, DefReg, TRI);
  878. if (!BB || (SingleBB && SingleBB != BB))
  879. return nullptr;
  880. SingleBB = BB;
  881. }
  882. return SingleBB;
  883. }
  884. static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB,
  885. SmallVectorImpl<unsigned> &UsedOpsInCopy,
  886. LiveRegUnits &UsedRegUnits,
  887. const TargetRegisterInfo *TRI) {
  888. for (auto U : UsedOpsInCopy) {
  889. MachineOperand &MO = MI->getOperand(U);
  890. Register SrcReg = MO.getReg();
  891. if (!UsedRegUnits.available(SrcReg)) {
  892. MachineBasicBlock::iterator NI = std::next(MI->getIterator());
  893. for (MachineInstr &UI : make_range(NI, CurBB.end())) {
  894. if (UI.killsRegister(SrcReg, TRI)) {
  895. UI.clearRegisterKills(SrcReg, TRI);
  896. MO.setIsKill(true);
  897. break;
  898. }
  899. }
  900. }
  901. }
  902. }
  903. static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB,
  904. SmallVectorImpl<unsigned> &UsedOpsInCopy,
  905. SmallVectorImpl<unsigned> &DefedRegsInCopy) {
  906. MachineFunction &MF = *SuccBB->getParent();
  907. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  908. for (unsigned DefReg : DefedRegsInCopy)
  909. for (MCSubRegIterator S(DefReg, TRI, true); S.isValid(); ++S)
  910. SuccBB->removeLiveIn(*S);
  911. for (auto U : UsedOpsInCopy) {
  912. Register Reg = MI->getOperand(U).getReg();
  913. if (!SuccBB->isLiveIn(Reg))
  914. SuccBB->addLiveIn(Reg);
  915. }
  916. }
  917. static bool hasRegisterDependency(MachineInstr *MI,
  918. SmallVectorImpl<unsigned> &UsedOpsInCopy,
  919. SmallVectorImpl<unsigned> &DefedRegsInCopy,
  920. LiveRegUnits &ModifiedRegUnits,
  921. LiveRegUnits &UsedRegUnits) {
  922. bool HasRegDependency = false;
  923. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  924. MachineOperand &MO = MI->getOperand(i);
  925. if (!MO.isReg())
  926. continue;
  927. Register Reg = MO.getReg();
  928. if (!Reg)
  929. continue;
  930. if (MO.isDef()) {
  931. if (!ModifiedRegUnits.available(Reg) || !UsedRegUnits.available(Reg)) {
  932. HasRegDependency = true;
  933. break;
  934. }
  935. DefedRegsInCopy.push_back(Reg);
  936. // FIXME: instead of isUse(), readsReg() would be a better fix here,
  937. // For example, we can ignore modifications in reg with undef. However,
  938. // it's not perfectly clear if skipping the internal read is safe in all
  939. // other targets.
  940. } else if (MO.isUse()) {
  941. if (!ModifiedRegUnits.available(Reg)) {
  942. HasRegDependency = true;
  943. break;
  944. }
  945. UsedOpsInCopy.push_back(i);
  946. }
  947. }
  948. return HasRegDependency;
  949. }
  950. static SmallSet<unsigned, 4> getRegUnits(unsigned Reg,
  951. const TargetRegisterInfo *TRI) {
  952. SmallSet<unsigned, 4> RegUnits;
  953. for (auto RI = MCRegUnitIterator(Reg, TRI); RI.isValid(); ++RI)
  954. RegUnits.insert(*RI);
  955. return RegUnits;
  956. }
  957. bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
  958. MachineFunction &MF,
  959. const TargetRegisterInfo *TRI,
  960. const TargetInstrInfo *TII) {
  961. SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs;
  962. // FIXME: For now, we sink only to a successor which has a single predecessor
  963. // so that we can directly sink COPY instructions to the successor without
  964. // adding any new block or branch instruction.
  965. for (MachineBasicBlock *SI : CurBB.successors())
  966. if (!SI->livein_empty() && SI->pred_size() == 1)
  967. SinkableBBs.insert(SI);
  968. if (SinkableBBs.empty())
  969. return false;
  970. bool Changed = false;
  971. // Track which registers have been modified and used between the end of the
  972. // block and the current instruction.
  973. ModifiedRegUnits.clear();
  974. UsedRegUnits.clear();
  975. SeenDbgInstrs.clear();
  976. for (auto I = CurBB.rbegin(), E = CurBB.rend(); I != E;) {
  977. MachineInstr *MI = &*I;
  978. ++I;
  979. // Track the operand index for use in Copy.
  980. SmallVector<unsigned, 2> UsedOpsInCopy;
  981. // Track the register number defed in Copy.
  982. SmallVector<unsigned, 2> DefedRegsInCopy;
  983. // We must sink this DBG_VALUE if its operand is sunk. To avoid searching
  984. // for DBG_VALUEs later, record them when they're encountered.
  985. if (MI->isDebugValue()) {
  986. auto &MO = MI->getOperand(0);
  987. if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) {
  988. // Bail if we can already tell the sink would be rejected, rather
  989. // than needlessly accumulating lots of DBG_VALUEs.
  990. if (hasRegisterDependency(MI, UsedOpsInCopy, DefedRegsInCopy,
  991. ModifiedRegUnits, UsedRegUnits))
  992. continue;
  993. // Record debug use of each reg unit.
  994. SmallSet<unsigned, 4> Units = getRegUnits(MO.getReg(), TRI);
  995. for (unsigned Reg : Units)
  996. SeenDbgInstrs[Reg].push_back(MI);
  997. }
  998. continue;
  999. }
  1000. if (MI->isDebugInstr())
  1001. continue;
  1002. // Do not move any instruction across function call.
  1003. if (MI->isCall())
  1004. return false;
  1005. if (!MI->isCopy() || !MI->getOperand(0).isRenamable()) {
  1006. LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
  1007. TRI);
  1008. continue;
  1009. }
  1010. // Don't sink the COPY if it would violate a register dependency.
  1011. if (hasRegisterDependency(MI, UsedOpsInCopy, DefedRegsInCopy,
  1012. ModifiedRegUnits, UsedRegUnits)) {
  1013. LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
  1014. TRI);
  1015. continue;
  1016. }
  1017. assert((!UsedOpsInCopy.empty() && !DefedRegsInCopy.empty()) &&
  1018. "Unexpect SrcReg or DefReg");
  1019. MachineBasicBlock *SuccBB =
  1020. getSingleLiveInSuccBB(CurBB, SinkableBBs, DefedRegsInCopy, TRI);
  1021. // Don't sink if we cannot find a single sinkable successor in which Reg
  1022. // is live-in.
  1023. if (!SuccBB) {
  1024. LiveRegUnits::accumulateUsedDefed(*MI, ModifiedRegUnits, UsedRegUnits,
  1025. TRI);
  1026. continue;
  1027. }
  1028. assert((SuccBB->pred_size() == 1 && *SuccBB->pred_begin() == &CurBB) &&
  1029. "Unexpected predecessor");
  1030. // Collect DBG_VALUEs that must sink with this copy. We've previously
  1031. // recorded which reg units that DBG_VALUEs read, if this instruction
  1032. // writes any of those units then the corresponding DBG_VALUEs must sink.
  1033. SetVector<MachineInstr *> DbgValsToSinkSet;
  1034. SmallVector<MachineInstr *, 4> DbgValsToSink;
  1035. for (auto &MO : MI->operands()) {
  1036. if (!MO.isReg() || !MO.isDef())
  1037. continue;
  1038. SmallSet<unsigned, 4> Units = getRegUnits(MO.getReg(), TRI);
  1039. for (unsigned Reg : Units)
  1040. for (auto *MI : SeenDbgInstrs.lookup(Reg))
  1041. DbgValsToSinkSet.insert(MI);
  1042. }
  1043. DbgValsToSink.insert(DbgValsToSink.begin(), DbgValsToSinkSet.begin(),
  1044. DbgValsToSinkSet.end());
  1045. // Clear the kill flag if SrcReg is killed between MI and the end of the
  1046. // block.
  1047. clearKillFlags(MI, CurBB, UsedOpsInCopy, UsedRegUnits, TRI);
  1048. MachineBasicBlock::iterator InsertPos = SuccBB->getFirstNonPHI();
  1049. performSink(*MI, *SuccBB, InsertPos, &DbgValsToSink);
  1050. updateLiveIn(MI, SuccBB, UsedOpsInCopy, DefedRegsInCopy);
  1051. Changed = true;
  1052. ++NumPostRACopySink;
  1053. }
  1054. return Changed;
  1055. }
  1056. bool PostRAMachineSinking::runOnMachineFunction(MachineFunction &MF) {
  1057. if (skipFunction(MF.getFunction()))
  1058. return false;
  1059. bool Changed = false;
  1060. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
  1061. const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
  1062. ModifiedRegUnits.init(*TRI);
  1063. UsedRegUnits.init(*TRI);
  1064. for (auto &BB : MF)
  1065. Changed |= tryToSinkCopy(BB, MF, TRI, TII);
  1066. return Changed;
  1067. }