MachineLICM.cpp 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529
  1. //===- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ----------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This pass performs loop invariant code motion on machine instructions. We
  10. // attempt to remove as much code from the body of a loop as possible.
  11. //
  12. // This pass is not intended to be a replacement or a complete alternative
  13. // for the LLVM-IR-level LICM pass. It is only designed to hoist simple
  14. // constructs that are not exposed before lowering and instruction selection.
  15. //
  16. //===----------------------------------------------------------------------===//
  17. #include "llvm/ADT/BitVector.h"
  18. #include "llvm/ADT/DenseMap.h"
  19. #include "llvm/ADT/STLExtras.h"
  20. #include "llvm/ADT/SmallSet.h"
  21. #include "llvm/ADT/SmallVector.h"
  22. #include "llvm/ADT/Statistic.h"
  23. #include "llvm/Analysis/AliasAnalysis.h"
  24. #include "llvm/CodeGen/MachineBasicBlock.h"
  25. #include "llvm/CodeGen/MachineDominators.h"
  26. #include "llvm/CodeGen/MachineFrameInfo.h"
  27. #include "llvm/CodeGen/MachineFunction.h"
  28. #include "llvm/CodeGen/MachineFunctionPass.h"
  29. #include "llvm/CodeGen/MachineInstr.h"
  30. #include "llvm/CodeGen/MachineLoopInfo.h"
  31. #include "llvm/CodeGen/MachineMemOperand.h"
  32. #include "llvm/CodeGen/MachineOperand.h"
  33. #include "llvm/CodeGen/MachineRegisterInfo.h"
  34. #include "llvm/CodeGen/PseudoSourceValue.h"
  35. #include "llvm/CodeGen/TargetInstrInfo.h"
  36. #include "llvm/CodeGen/TargetLowering.h"
  37. #include "llvm/CodeGen/TargetRegisterInfo.h"
  38. #include "llvm/CodeGen/TargetSchedule.h"
  39. #include "llvm/CodeGen/TargetSubtargetInfo.h"
  40. #include "llvm/IR/DebugLoc.h"
  41. #include "llvm/MC/MCInstrDesc.h"
  42. #include "llvm/MC/MCRegisterInfo.h"
  43. #include "llvm/Pass.h"
  44. #include "llvm/Support/Casting.h"
  45. #include "llvm/Support/CommandLine.h"
  46. #include "llvm/Support/Debug.h"
  47. #include "llvm/Support/raw_ostream.h"
  48. #include <algorithm>
  49. #include <cassert>
  50. #include <limits>
  51. #include <vector>
  52. using namespace llvm;
  53. #define DEBUG_TYPE "machinelicm"
  54. static cl::opt<bool>
  55. AvoidSpeculation("avoid-speculation",
  56. cl::desc("MachineLICM should avoid speculation"),
  57. cl::init(true), cl::Hidden);
  58. static cl::opt<bool>
  59. HoistCheapInsts("hoist-cheap-insts",
  60. cl::desc("MachineLICM should hoist even cheap instructions"),
  61. cl::init(false), cl::Hidden);
  62. static cl::opt<bool>
  63. SinkInstsToAvoidSpills("sink-insts-to-avoid-spills",
  64. cl::desc("MachineLICM should sink instructions into "
  65. "loops to avoid register spills"),
  66. cl::init(false), cl::Hidden);
  67. static cl::opt<bool>
  68. HoistConstStores("hoist-const-stores",
  69. cl::desc("Hoist invariant stores"),
  70. cl::init(true), cl::Hidden);
  71. STATISTIC(NumHoisted,
  72. "Number of machine instructions hoisted out of loops");
  73. STATISTIC(NumLowRP,
  74. "Number of instructions hoisted in low reg pressure situation");
  75. STATISTIC(NumHighLatency,
  76. "Number of high latency instructions hoisted");
  77. STATISTIC(NumCSEed,
  78. "Number of hoisted machine instructions CSEed");
  79. STATISTIC(NumPostRAHoisted,
  80. "Number of machine instructions hoisted out of loops post regalloc");
  81. STATISTIC(NumStoreConst,
  82. "Number of stores of const phys reg hoisted out of loops");
  83. namespace {
  84. class MachineLICMBase : public MachineFunctionPass {
  85. const TargetInstrInfo *TII;
  86. const TargetLoweringBase *TLI;
  87. const TargetRegisterInfo *TRI;
  88. const MachineFrameInfo *MFI;
  89. MachineRegisterInfo *MRI;
  90. TargetSchedModel SchedModel;
  91. bool PreRegAlloc;
  92. // Various analyses that we use...
  93. AliasAnalysis *AA; // Alias analysis info.
  94. MachineLoopInfo *MLI; // Current MachineLoopInfo
  95. MachineDominatorTree *DT; // Machine dominator tree for the cur loop
  96. // State that is updated as we process loops
  97. bool Changed; // True if a loop is changed.
  98. bool FirstInLoop; // True if it's the first LICM in the loop.
  99. MachineLoop *CurLoop; // The current loop we are working on.
  100. MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
  101. // Exit blocks for CurLoop.
  102. SmallVector<MachineBasicBlock *, 8> ExitBlocks;
  103. bool isExitBlock(const MachineBasicBlock *MBB) const {
  104. return is_contained(ExitBlocks, MBB);
  105. }
  106. // Track 'estimated' register pressure.
  107. SmallSet<unsigned, 32> RegSeen;
  108. SmallVector<unsigned, 8> RegPressure;
  109. // Register pressure "limit" per register pressure set. If the pressure
  110. // is higher than the limit, then it's considered high.
  111. SmallVector<unsigned, 8> RegLimit;
  112. // Register pressure on path leading from loop preheader to current BB.
  113. SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
  114. // For each opcode, keep a list of potential CSE instructions.
  115. DenseMap<unsigned, std::vector<const MachineInstr *>> CSEMap;
  116. enum {
  117. SpeculateFalse = 0,
  118. SpeculateTrue = 1,
  119. SpeculateUnknown = 2
  120. };
  121. // If a MBB does not dominate loop exiting blocks then it may not safe
  122. // to hoist loads from this block.
  123. // Tri-state: 0 - false, 1 - true, 2 - unknown
  124. unsigned SpeculationState;
  125. public:
  126. MachineLICMBase(char &PassID, bool PreRegAlloc)
  127. : MachineFunctionPass(PassID), PreRegAlloc(PreRegAlloc) {}
  128. bool runOnMachineFunction(MachineFunction &MF) override;
  129. void getAnalysisUsage(AnalysisUsage &AU) const override {
  130. AU.addRequired<MachineLoopInfo>();
  131. AU.addRequired<MachineDominatorTree>();
  132. AU.addRequired<AAResultsWrapperPass>();
  133. AU.addPreserved<MachineLoopInfo>();
  134. AU.addPreserved<MachineDominatorTree>();
  135. MachineFunctionPass::getAnalysisUsage(AU);
  136. }
  137. void releaseMemory() override {
  138. RegSeen.clear();
  139. RegPressure.clear();
  140. RegLimit.clear();
  141. BackTrace.clear();
  142. CSEMap.clear();
  143. }
  144. private:
  145. /// Keep track of information about hoisting candidates.
  146. struct CandidateInfo {
  147. MachineInstr *MI;
  148. unsigned Def;
  149. int FI;
  150. CandidateInfo(MachineInstr *mi, unsigned def, int fi)
  151. : MI(mi), Def(def), FI(fi) {}
  152. };
  153. void HoistRegionPostRA();
  154. void HoistPostRA(MachineInstr *MI, unsigned Def);
  155. void ProcessMI(MachineInstr *MI, BitVector &PhysRegDefs,
  156. BitVector &PhysRegClobbers, SmallSet<int, 32> &StoredFIs,
  157. SmallVectorImpl<CandidateInfo> &Candidates);
  158. void AddToLiveIns(unsigned Reg);
  159. bool IsLICMCandidate(MachineInstr &I);
  160. bool IsLoopInvariantInst(MachineInstr &I);
  161. bool HasLoopPHIUse(const MachineInstr *MI) const;
  162. bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
  163. unsigned Reg) const;
  164. bool IsCheapInstruction(MachineInstr &MI) const;
  165. bool CanCauseHighRegPressure(const DenseMap<unsigned, int> &Cost,
  166. bool Cheap);
  167. void UpdateBackTraceRegPressure(const MachineInstr *MI);
  168. bool IsProfitableToHoist(MachineInstr &MI);
  169. bool IsGuaranteedToExecute(MachineBasicBlock *BB);
  170. void EnterScope(MachineBasicBlock *MBB);
  171. void ExitScope(MachineBasicBlock *MBB);
  172. void ExitScopeIfDone(
  173. MachineDomTreeNode *Node,
  174. DenseMap<MachineDomTreeNode *, unsigned> &OpenChildren,
  175. DenseMap<MachineDomTreeNode *, MachineDomTreeNode *> &ParentMap);
  176. void HoistOutOfLoop(MachineDomTreeNode *HeaderN);
  177. void HoistRegion(MachineDomTreeNode *N, bool IsHeader);
  178. void SinkIntoLoop();
  179. void InitRegPressure(MachineBasicBlock *BB);
  180. DenseMap<unsigned, int> calcRegisterCost(const MachineInstr *MI,
  181. bool ConsiderSeen,
  182. bool ConsiderUnseenAsDef);
  183. void UpdateRegPressure(const MachineInstr *MI,
  184. bool ConsiderUnseenAsDef = false);
  185. MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
  186. const MachineInstr *
  187. LookForDuplicate(const MachineInstr *MI,
  188. std::vector<const MachineInstr *> &PrevMIs);
  189. bool EliminateCSE(
  190. MachineInstr *MI,
  191. DenseMap<unsigned, std::vector<const MachineInstr *>>::iterator &CI);
  192. bool MayCSE(MachineInstr *MI);
  193. bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
  194. void InitCSEMap(MachineBasicBlock *BB);
  195. MachineBasicBlock *getCurPreheader();
  196. };
  197. class MachineLICM : public MachineLICMBase {
  198. public:
  199. static char ID;
  200. MachineLICM() : MachineLICMBase(ID, false) {
  201. initializeMachineLICMPass(*PassRegistry::getPassRegistry());
  202. }
  203. };
  204. class EarlyMachineLICM : public MachineLICMBase {
  205. public:
  206. static char ID;
  207. EarlyMachineLICM() : MachineLICMBase(ID, true) {
  208. initializeEarlyMachineLICMPass(*PassRegistry::getPassRegistry());
  209. }
  210. };
  211. } // end anonymous namespace
  212. char MachineLICM::ID;
  213. char EarlyMachineLICM::ID;
  214. char &llvm::MachineLICMID = MachineLICM::ID;
  215. char &llvm::EarlyMachineLICMID = EarlyMachineLICM::ID;
  216. INITIALIZE_PASS_BEGIN(MachineLICM, DEBUG_TYPE,
  217. "Machine Loop Invariant Code Motion", false, false)
  218. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  219. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  220. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  221. INITIALIZE_PASS_END(MachineLICM, DEBUG_TYPE,
  222. "Machine Loop Invariant Code Motion", false, false)
  223. INITIALIZE_PASS_BEGIN(EarlyMachineLICM, "early-machinelicm",
  224. "Early Machine Loop Invariant Code Motion", false, false)
  225. INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
  226. INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
  227. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  228. INITIALIZE_PASS_END(EarlyMachineLICM, "early-machinelicm",
  229. "Early Machine Loop Invariant Code Motion", false, false)
  230. /// Test if the given loop is the outer-most loop that has a unique predecessor.
  231. static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
  232. // Check whether this loop even has a unique predecessor.
  233. if (!CurLoop->getLoopPredecessor())
  234. return false;
  235. // Ok, now check to see if any of its outer loops do.
  236. for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
  237. if (L->getLoopPredecessor())
  238. return false;
  239. // None of them did, so this is the outermost with a unique predecessor.
  240. return true;
  241. }
  242. bool MachineLICMBase::runOnMachineFunction(MachineFunction &MF) {
  243. if (skipFunction(MF.getFunction()))
  244. return false;
  245. Changed = FirstInLoop = false;
  246. const TargetSubtargetInfo &ST = MF.getSubtarget();
  247. TII = ST.getInstrInfo();
  248. TLI = ST.getTargetLowering();
  249. TRI = ST.getRegisterInfo();
  250. MFI = &MF.getFrameInfo();
  251. MRI = &MF.getRegInfo();
  252. SchedModel.init(&ST);
  253. PreRegAlloc = MRI->isSSA();
  254. if (PreRegAlloc)
  255. LLVM_DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
  256. else
  257. LLVM_DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
  258. LLVM_DEBUG(dbgs() << MF.getName() << " ********\n");
  259. if (PreRegAlloc) {
  260. // Estimate register pressure during pre-regalloc pass.
  261. unsigned NumRPS = TRI->getNumRegPressureSets();
  262. RegPressure.resize(NumRPS);
  263. std::fill(RegPressure.begin(), RegPressure.end(), 0);
  264. RegLimit.resize(NumRPS);
  265. for (unsigned i = 0, e = NumRPS; i != e; ++i)
  266. RegLimit[i] = TRI->getRegPressureSetLimit(MF, i);
  267. }
  268. // Get our Loop information...
  269. MLI = &getAnalysis<MachineLoopInfo>();
  270. DT = &getAnalysis<MachineDominatorTree>();
  271. AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  272. SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
  273. while (!Worklist.empty()) {
  274. CurLoop = Worklist.pop_back_val();
  275. CurPreheader = nullptr;
  276. ExitBlocks.clear();
  277. // If this is done before regalloc, only visit outer-most preheader-sporting
  278. // loops.
  279. if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
  280. Worklist.append(CurLoop->begin(), CurLoop->end());
  281. continue;
  282. }
  283. CurLoop->getExitBlocks(ExitBlocks);
  284. if (!PreRegAlloc)
  285. HoistRegionPostRA();
  286. else {
  287. // CSEMap is initialized for loop header when the first instruction is
  288. // being hoisted.
  289. MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
  290. FirstInLoop = true;
  291. HoistOutOfLoop(N);
  292. CSEMap.clear();
  293. if (SinkInstsToAvoidSpills)
  294. SinkIntoLoop();
  295. }
  296. }
  297. return Changed;
  298. }
  299. /// Return true if instruction stores to the specified frame.
  300. static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
  301. // Check mayStore before memory operands so that e.g. DBG_VALUEs will return
  302. // true since they have no memory operands.
  303. if (!MI->mayStore())
  304. return false;
  305. // If we lost memory operands, conservatively assume that the instruction
  306. // writes to all slots.
  307. if (MI->memoperands_empty())
  308. return true;
  309. for (const MachineMemOperand *MemOp : MI->memoperands()) {
  310. if (!MemOp->isStore() || !MemOp->getPseudoValue())
  311. continue;
  312. if (const FixedStackPseudoSourceValue *Value =
  313. dyn_cast<FixedStackPseudoSourceValue>(MemOp->getPseudoValue())) {
  314. if (Value->getFrameIndex() == FI)
  315. return true;
  316. }
  317. }
  318. return false;
  319. }
  320. /// Examine the instruction for potentai LICM candidate. Also
  321. /// gather register def and frame object update information.
  322. void MachineLICMBase::ProcessMI(MachineInstr *MI,
  323. BitVector &PhysRegDefs,
  324. BitVector &PhysRegClobbers,
  325. SmallSet<int, 32> &StoredFIs,
  326. SmallVectorImpl<CandidateInfo> &Candidates) {
  327. bool RuledOut = false;
  328. bool HasNonInvariantUse = false;
  329. unsigned Def = 0;
  330. for (const MachineOperand &MO : MI->operands()) {
  331. if (MO.isFI()) {
  332. // Remember if the instruction stores to the frame index.
  333. int FI = MO.getIndex();
  334. if (!StoredFIs.count(FI) &&
  335. MFI->isSpillSlotObjectIndex(FI) &&
  336. InstructionStoresToFI(MI, FI))
  337. StoredFIs.insert(FI);
  338. HasNonInvariantUse = true;
  339. continue;
  340. }
  341. // We can't hoist an instruction defining a physreg that is clobbered in
  342. // the loop.
  343. if (MO.isRegMask()) {
  344. PhysRegClobbers.setBitsNotInMask(MO.getRegMask());
  345. continue;
  346. }
  347. if (!MO.isReg())
  348. continue;
  349. Register Reg = MO.getReg();
  350. if (!Reg)
  351. continue;
  352. assert(Register::isPhysicalRegister(Reg) &&
  353. "Not expecting virtual register!");
  354. if (!MO.isDef()) {
  355. if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
  356. // If it's using a non-loop-invariant register, then it's obviously not
  357. // safe to hoist.
  358. HasNonInvariantUse = true;
  359. continue;
  360. }
  361. if (MO.isImplicit()) {
  362. for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
  363. PhysRegClobbers.set(*AI);
  364. if (!MO.isDead())
  365. // Non-dead implicit def? This cannot be hoisted.
  366. RuledOut = true;
  367. // No need to check if a dead implicit def is also defined by
  368. // another instruction.
  369. continue;
  370. }
  371. // FIXME: For now, avoid instructions with multiple defs, unless
  372. // it's a dead implicit def.
  373. if (Def)
  374. RuledOut = true;
  375. else
  376. Def = Reg;
  377. // If we have already seen another instruction that defines the same
  378. // register, then this is not safe. Two defs is indicated by setting a
  379. // PhysRegClobbers bit.
  380. for (MCRegAliasIterator AS(Reg, TRI, true); AS.isValid(); ++AS) {
  381. if (PhysRegDefs.test(*AS))
  382. PhysRegClobbers.set(*AS);
  383. }
  384. // Need a second loop because MCRegAliasIterator can visit the same
  385. // register twice.
  386. for (MCRegAliasIterator AS(Reg, TRI, true); AS.isValid(); ++AS)
  387. PhysRegDefs.set(*AS);
  388. if (PhysRegClobbers.test(Reg))
  389. // MI defined register is seen defined by another instruction in
  390. // the loop, it cannot be a LICM candidate.
  391. RuledOut = true;
  392. }
  393. // Only consider reloads for now and remats which do not have register
  394. // operands. FIXME: Consider unfold load folding instructions.
  395. if (Def && !RuledOut) {
  396. int FI = std::numeric_limits<int>::min();
  397. if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
  398. (TII->isLoadFromStackSlot(*MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
  399. Candidates.push_back(CandidateInfo(MI, Def, FI));
  400. }
  401. }
  402. /// Walk the specified region of the CFG and hoist loop invariants out to the
  403. /// preheader.
  404. void MachineLICMBase::HoistRegionPostRA() {
  405. MachineBasicBlock *Preheader = getCurPreheader();
  406. if (!Preheader)
  407. return;
  408. unsigned NumRegs = TRI->getNumRegs();
  409. BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
  410. BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
  411. SmallVector<CandidateInfo, 32> Candidates;
  412. SmallSet<int, 32> StoredFIs;
  413. // Walk the entire region, count number of defs for each register, and
  414. // collect potential LICM candidates.
  415. for (MachineBasicBlock *BB : CurLoop->getBlocks()) {
  416. // If the header of the loop containing this basic block is a landing pad,
  417. // then don't try to hoist instructions out of this loop.
  418. const MachineLoop *ML = MLI->getLoopFor(BB);
  419. if (ML && ML->getHeader()->isEHPad()) continue;
  420. // Conservatively treat live-in's as an external def.
  421. // FIXME: That means a reload that're reused in successor block(s) will not
  422. // be LICM'ed.
  423. for (const auto &LI : BB->liveins()) {
  424. for (MCRegAliasIterator AI(LI.PhysReg, TRI, true); AI.isValid(); ++AI)
  425. PhysRegDefs.set(*AI);
  426. }
  427. SpeculationState = SpeculateUnknown;
  428. for (MachineInstr &MI : *BB)
  429. ProcessMI(&MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
  430. }
  431. // Gather the registers read / clobbered by the terminator.
  432. BitVector TermRegs(NumRegs);
  433. MachineBasicBlock::iterator TI = Preheader->getFirstTerminator();
  434. if (TI != Preheader->end()) {
  435. for (const MachineOperand &MO : TI->operands()) {
  436. if (!MO.isReg())
  437. continue;
  438. Register Reg = MO.getReg();
  439. if (!Reg)
  440. continue;
  441. for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
  442. TermRegs.set(*AI);
  443. }
  444. }
  445. // Now evaluate whether the potential candidates qualify.
  446. // 1. Check if the candidate defined register is defined by another
  447. // instruction in the loop.
  448. // 2. If the candidate is a load from stack slot (always true for now),
  449. // check if the slot is stored anywhere in the loop.
  450. // 3. Make sure candidate def should not clobber
  451. // registers read by the terminator. Similarly its def should not be
  452. // clobbered by the terminator.
  453. for (CandidateInfo &Candidate : Candidates) {
  454. if (Candidate.FI != std::numeric_limits<int>::min() &&
  455. StoredFIs.count(Candidate.FI))
  456. continue;
  457. unsigned Def = Candidate.Def;
  458. if (!PhysRegClobbers.test(Def) && !TermRegs.test(Def)) {
  459. bool Safe = true;
  460. MachineInstr *MI = Candidate.MI;
  461. for (const MachineOperand &MO : MI->operands()) {
  462. if (!MO.isReg() || MO.isDef() || !MO.getReg())
  463. continue;
  464. Register Reg = MO.getReg();
  465. if (PhysRegDefs.test(Reg) ||
  466. PhysRegClobbers.test(Reg)) {
  467. // If it's using a non-loop-invariant register, then it's obviously
  468. // not safe to hoist.
  469. Safe = false;
  470. break;
  471. }
  472. }
  473. if (Safe)
  474. HoistPostRA(MI, Candidate.Def);
  475. }
  476. }
  477. }
  478. /// Add register 'Reg' to the livein sets of BBs in the current loop, and make
  479. /// sure it is not killed by any instructions in the loop.
  480. void MachineLICMBase::AddToLiveIns(unsigned Reg) {
  481. for (MachineBasicBlock *BB : CurLoop->getBlocks()) {
  482. if (!BB->isLiveIn(Reg))
  483. BB->addLiveIn(Reg);
  484. for (MachineInstr &MI : *BB) {
  485. for (MachineOperand &MO : MI.operands()) {
  486. if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
  487. if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
  488. MO.setIsKill(false);
  489. }
  490. }
  491. }
  492. }
  493. /// When an instruction is found to only use loop invariant operands that is
  494. /// safe to hoist, this instruction is called to do the dirty work.
  495. void MachineLICMBase::HoistPostRA(MachineInstr *MI, unsigned Def) {
  496. MachineBasicBlock *Preheader = getCurPreheader();
  497. // Now move the instructions to the predecessor, inserting it before any
  498. // terminator instructions.
  499. LLVM_DEBUG(dbgs() << "Hoisting to " << printMBBReference(*Preheader)
  500. << " from " << printMBBReference(*MI->getParent()) << ": "
  501. << *MI);
  502. // Splice the instruction to the preheader.
  503. MachineBasicBlock *MBB = MI->getParent();
  504. Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
  505. // Add register to livein list to all the BBs in the current loop since a
  506. // loop invariant must be kept live throughout the whole loop. This is
  507. // important to ensure later passes do not scavenge the def register.
  508. AddToLiveIns(Def);
  509. ++NumPostRAHoisted;
  510. Changed = true;
  511. }
  512. /// Check if this mbb is guaranteed to execute. If not then a load from this mbb
  513. /// may not be safe to hoist.
  514. bool MachineLICMBase::IsGuaranteedToExecute(MachineBasicBlock *BB) {
  515. if (SpeculationState != SpeculateUnknown)
  516. return SpeculationState == SpeculateFalse;
  517. if (BB != CurLoop->getHeader()) {
  518. // Check loop exiting blocks.
  519. SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
  520. CurLoop->getExitingBlocks(CurrentLoopExitingBlocks);
  521. for (MachineBasicBlock *CurrentLoopExitingBlock : CurrentLoopExitingBlocks)
  522. if (!DT->dominates(BB, CurrentLoopExitingBlock)) {
  523. SpeculationState = SpeculateTrue;
  524. return false;
  525. }
  526. }
  527. SpeculationState = SpeculateFalse;
  528. return true;
  529. }
  530. void MachineLICMBase::EnterScope(MachineBasicBlock *MBB) {
  531. LLVM_DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n');
  532. // Remember livein register pressure.
  533. BackTrace.push_back(RegPressure);
  534. }
  535. void MachineLICMBase::ExitScope(MachineBasicBlock *MBB) {
  536. LLVM_DEBUG(dbgs() << "Exiting " << printMBBReference(*MBB) << '\n');
  537. BackTrace.pop_back();
  538. }
  539. /// Destroy scope for the MBB that corresponds to the given dominator tree node
  540. /// if its a leaf or all of its children are done. Walk up the dominator tree to
  541. /// destroy ancestors which are now done.
  542. void MachineLICMBase::ExitScopeIfDone(MachineDomTreeNode *Node,
  543. DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
  544. DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
  545. if (OpenChildren[Node])
  546. return;
  547. // Pop scope.
  548. ExitScope(Node->getBlock());
  549. // Now traverse upwards to pop ancestors whose offsprings are all done.
  550. while (MachineDomTreeNode *Parent = ParentMap[Node]) {
  551. unsigned Left = --OpenChildren[Parent];
  552. if (Left != 0)
  553. break;
  554. ExitScope(Parent->getBlock());
  555. Node = Parent;
  556. }
  557. }
  558. /// Walk the specified loop in the CFG (defined by all blocks dominated by the
  559. /// specified header block, and that are in the current loop) in depth first
  560. /// order w.r.t the DominatorTree. This allows us to visit definitions before
  561. /// uses, allowing us to hoist a loop body in one pass without iteration.
  562. void MachineLICMBase::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
  563. MachineBasicBlock *Preheader = getCurPreheader();
  564. if (!Preheader)
  565. return;
  566. SmallVector<MachineDomTreeNode*, 32> Scopes;
  567. SmallVector<MachineDomTreeNode*, 8> WorkList;
  568. DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
  569. DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
  570. // Perform a DFS walk to determine the order of visit.
  571. WorkList.push_back(HeaderN);
  572. while (!WorkList.empty()) {
  573. MachineDomTreeNode *Node = WorkList.pop_back_val();
  574. assert(Node && "Null dominator tree node?");
  575. MachineBasicBlock *BB = Node->getBlock();
  576. // If the header of the loop containing this basic block is a landing pad,
  577. // then don't try to hoist instructions out of this loop.
  578. const MachineLoop *ML = MLI->getLoopFor(BB);
  579. if (ML && ML->getHeader()->isEHPad())
  580. continue;
  581. // If this subregion is not in the top level loop at all, exit.
  582. if (!CurLoop->contains(BB))
  583. continue;
  584. Scopes.push_back(Node);
  585. const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
  586. unsigned NumChildren = Children.size();
  587. // Don't hoist things out of a large switch statement. This often causes
  588. // code to be hoisted that wasn't going to be executed, and increases
  589. // register pressure in a situation where it's likely to matter.
  590. if (BB->succ_size() >= 25)
  591. NumChildren = 0;
  592. OpenChildren[Node] = NumChildren;
  593. // Add children in reverse order as then the next popped worklist node is
  594. // the first child of this node. This means we ultimately traverse the
  595. // DOM tree in exactly the same order as if we'd recursed.
  596. for (int i = (int)NumChildren-1; i >= 0; --i) {
  597. MachineDomTreeNode *Child = Children[i];
  598. ParentMap[Child] = Node;
  599. WorkList.push_back(Child);
  600. }
  601. }
  602. if (Scopes.size() == 0)
  603. return;
  604. // Compute registers which are livein into the loop headers.
  605. RegSeen.clear();
  606. BackTrace.clear();
  607. InitRegPressure(Preheader);
  608. // Now perform LICM.
  609. for (MachineDomTreeNode *Node : Scopes) {
  610. MachineBasicBlock *MBB = Node->getBlock();
  611. EnterScope(MBB);
  612. // Process the block
  613. SpeculationState = SpeculateUnknown;
  614. for (MachineBasicBlock::iterator
  615. MII = MBB->begin(), E = MBB->end(); MII != E; ) {
  616. MachineBasicBlock::iterator NextMII = MII; ++NextMII;
  617. MachineInstr *MI = &*MII;
  618. if (!Hoist(MI, Preheader))
  619. UpdateRegPressure(MI);
  620. // If we have hoisted an instruction that may store, it can only be a
  621. // constant store.
  622. MII = NextMII;
  623. }
  624. // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
  625. ExitScopeIfDone(Node, OpenChildren, ParentMap);
  626. }
  627. }
  628. /// Sink instructions into loops if profitable. This especially tries to prevent
  629. /// register spills caused by register pressure if there is little to no
  630. /// overhead moving instructions into loops.
  631. void MachineLICMBase::SinkIntoLoop() {
  632. MachineBasicBlock *Preheader = getCurPreheader();
  633. if (!Preheader)
  634. return;
  635. SmallVector<MachineInstr *, 8> Candidates;
  636. for (MachineBasicBlock::instr_iterator I = Preheader->instr_begin();
  637. I != Preheader->instr_end(); ++I) {
  638. // We need to ensure that we can safely move this instruction into the loop.
  639. // As such, it must not have side-effects, e.g. such as a call has.
  640. if (IsLoopInvariantInst(*I) && !HasLoopPHIUse(&*I))
  641. Candidates.push_back(&*I);
  642. }
  643. for (MachineInstr *I : Candidates) {
  644. const MachineOperand &MO = I->getOperand(0);
  645. if (!MO.isDef() || !MO.isReg() || !MO.getReg())
  646. continue;
  647. if (!MRI->hasOneDef(MO.getReg()))
  648. continue;
  649. bool CanSink = true;
  650. MachineBasicBlock *B = nullptr;
  651. for (MachineInstr &MI : MRI->use_instructions(MO.getReg())) {
  652. // FIXME: Come up with a proper cost model that estimates whether sinking
  653. // the instruction (and thus possibly executing it on every loop
  654. // iteration) is more expensive than a register.
  655. // For now assumes that copies are cheap and thus almost always worth it.
  656. if (!MI.isCopy()) {
  657. CanSink = false;
  658. break;
  659. }
  660. if (!B) {
  661. B = MI.getParent();
  662. continue;
  663. }
  664. B = DT->findNearestCommonDominator(B, MI.getParent());
  665. if (!B) {
  666. CanSink = false;
  667. break;
  668. }
  669. }
  670. if (!CanSink || !B || B == Preheader)
  671. continue;
  672. B->splice(B->getFirstNonPHI(), Preheader, I);
  673. }
  674. }
  675. static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
  676. return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
  677. }
  678. /// Find all virtual register references that are liveout of the preheader to
  679. /// initialize the starting "register pressure". Note this does not count live
  680. /// through (livein but not used) registers.
  681. void MachineLICMBase::InitRegPressure(MachineBasicBlock *BB) {
  682. std::fill(RegPressure.begin(), RegPressure.end(), 0);
  683. // If the preheader has only a single predecessor and it ends with a
  684. // fallthrough or an unconditional branch, then scan its predecessor for live
  685. // defs as well. This happens whenever the preheader is created by splitting
  686. // the critical edge from the loop predecessor to the loop header.
  687. if (BB->pred_size() == 1) {
  688. MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
  689. SmallVector<MachineOperand, 4> Cond;
  690. if (!TII->analyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
  691. InitRegPressure(*BB->pred_begin());
  692. }
  693. for (const MachineInstr &MI : *BB)
  694. UpdateRegPressure(&MI, /*ConsiderUnseenAsDef=*/true);
  695. }
  696. /// Update estimate of register pressure after the specified instruction.
  697. void MachineLICMBase::UpdateRegPressure(const MachineInstr *MI,
  698. bool ConsiderUnseenAsDef) {
  699. auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/true, ConsiderUnseenAsDef);
  700. for (const auto &RPIdAndCost : Cost) {
  701. unsigned Class = RPIdAndCost.first;
  702. if (static_cast<int>(RegPressure[Class]) < -RPIdAndCost.second)
  703. RegPressure[Class] = 0;
  704. else
  705. RegPressure[Class] += RPIdAndCost.second;
  706. }
  707. }
  708. /// Calculate the additional register pressure that the registers used in MI
  709. /// cause.
  710. ///
  711. /// If 'ConsiderSeen' is true, updates 'RegSeen' and uses the information to
  712. /// figure out which usages are live-ins.
  713. /// FIXME: Figure out a way to consider 'RegSeen' from all code paths.
  714. DenseMap<unsigned, int>
  715. MachineLICMBase::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
  716. bool ConsiderUnseenAsDef) {
  717. DenseMap<unsigned, int> Cost;
  718. if (MI->isImplicitDef())
  719. return Cost;
  720. for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
  721. const MachineOperand &MO = MI->getOperand(i);
  722. if (!MO.isReg() || MO.isImplicit())
  723. continue;
  724. Register Reg = MO.getReg();
  725. if (!Register::isVirtualRegister(Reg))
  726. continue;
  727. // FIXME: It seems bad to use RegSeen only for some of these calculations.
  728. bool isNew = ConsiderSeen ? RegSeen.insert(Reg).second : false;
  729. const TargetRegisterClass *RC = MRI->getRegClass(Reg);
  730. RegClassWeight W = TRI->getRegClassWeight(RC);
  731. int RCCost = 0;
  732. if (MO.isDef())
  733. RCCost = W.RegWeight;
  734. else {
  735. bool isKill = isOperandKill(MO, MRI);
  736. if (isNew && !isKill && ConsiderUnseenAsDef)
  737. // Haven't seen this, it must be a livein.
  738. RCCost = W.RegWeight;
  739. else if (!isNew && isKill)
  740. RCCost = -W.RegWeight;
  741. }
  742. if (RCCost == 0)
  743. continue;
  744. const int *PS = TRI->getRegClassPressureSets(RC);
  745. for (; *PS != -1; ++PS) {
  746. if (Cost.find(*PS) == Cost.end())
  747. Cost[*PS] = RCCost;
  748. else
  749. Cost[*PS] += RCCost;
  750. }
  751. }
  752. return Cost;
  753. }
  754. /// Return true if this machine instruction loads from global offset table or
  755. /// constant pool.
  756. static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) {
  757. assert(MI.mayLoad() && "Expected MI that loads!");
  758. // If we lost memory operands, conservatively assume that the instruction
  759. // reads from everything..
  760. if (MI.memoperands_empty())
  761. return true;
  762. for (MachineMemOperand *MemOp : MI.memoperands())
  763. if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
  764. if (PSV->isGOT() || PSV->isConstantPool())
  765. return true;
  766. return false;
  767. }
  768. // This function iterates through all the operands of the input store MI and
  769. // checks that each register operand statisfies isCallerPreservedPhysReg.
  770. // This means, the value being stored and the address where it is being stored
  771. // is constant throughout the body of the function (not including prologue and
  772. // epilogue). When called with an MI that isn't a store, it returns false.
  773. // A future improvement can be to check if the store registers are constant
  774. // throughout the loop rather than throughout the funtion.
  775. static bool isInvariantStore(const MachineInstr &MI,
  776. const TargetRegisterInfo *TRI,
  777. const MachineRegisterInfo *MRI) {
  778. bool FoundCallerPresReg = false;
  779. if (!MI.mayStore() || MI.hasUnmodeledSideEffects() ||
  780. (MI.getNumOperands() == 0))
  781. return false;
  782. // Check that all register operands are caller-preserved physical registers.
  783. for (const MachineOperand &MO : MI.operands()) {
  784. if (MO.isReg()) {
  785. Register Reg = MO.getReg();
  786. // If operand is a virtual register, check if it comes from a copy of a
  787. // physical register.
  788. if (Register::isVirtualRegister(Reg))
  789. Reg = TRI->lookThruCopyLike(MO.getReg(), MRI);
  790. if (Register::isVirtualRegister(Reg))
  791. return false;
  792. if (!TRI->isCallerPreservedPhysReg(Reg, *MI.getMF()))
  793. return false;
  794. else
  795. FoundCallerPresReg = true;
  796. } else if (!MO.isImm()) {
  797. return false;
  798. }
  799. }
  800. return FoundCallerPresReg;
  801. }
  802. // Return true if the input MI is a copy instruction that feeds an invariant
  803. // store instruction. This means that the src of the copy has to satisfy
  804. // isCallerPreservedPhysReg and atleast one of it's users should satisfy
  805. // isInvariantStore.
  806. static bool isCopyFeedingInvariantStore(const MachineInstr &MI,
  807. const MachineRegisterInfo *MRI,
  808. const TargetRegisterInfo *TRI) {
  809. // FIXME: If targets would like to look through instructions that aren't
  810. // pure copies, this can be updated to a query.
  811. if (!MI.isCopy())
  812. return false;
  813. const MachineFunction *MF = MI.getMF();
  814. // Check that we are copying a constant physical register.
  815. Register CopySrcReg = MI.getOperand(1).getReg();
  816. if (Register::isVirtualRegister(CopySrcReg))
  817. return false;
  818. if (!TRI->isCallerPreservedPhysReg(CopySrcReg, *MF))
  819. return false;
  820. Register CopyDstReg = MI.getOperand(0).getReg();
  821. // Check if any of the uses of the copy are invariant stores.
  822. assert(Register::isVirtualRegister(CopyDstReg) &&
  823. "copy dst is not a virtual reg");
  824. for (MachineInstr &UseMI : MRI->use_instructions(CopyDstReg)) {
  825. if (UseMI.mayStore() && isInvariantStore(UseMI, TRI, MRI))
  826. return true;
  827. }
  828. return false;
  829. }
  830. /// Returns true if the instruction may be a suitable candidate for LICM.
  831. /// e.g. If the instruction is a call, then it's obviously not safe to hoist it.
  832. bool MachineLICMBase::IsLICMCandidate(MachineInstr &I) {
  833. // Check if it's safe to move the instruction.
  834. bool DontMoveAcrossStore = true;
  835. if ((!I.isSafeToMove(AA, DontMoveAcrossStore)) &&
  836. !(HoistConstStores && isInvariantStore(I, TRI, MRI))) {
  837. return false;
  838. }
  839. // If it is load then check if it is guaranteed to execute by making sure that
  840. // it dominates all exiting blocks. If it doesn't, then there is a path out of
  841. // the loop which does not execute this load, so we can't hoist it. Loads
  842. // from constant memory are not safe to speculate all the time, for example
  843. // indexed load from a jump table.
  844. // Stores and side effects are already checked by isSafeToMove.
  845. if (I.mayLoad() && !mayLoadFromGOTOrConstantPool(I) &&
  846. !IsGuaranteedToExecute(I.getParent()))
  847. return false;
  848. return true;
  849. }
  850. /// Returns true if the instruction is loop invariant.
  851. /// I.e., all virtual register operands are defined outside of the loop,
  852. /// physical registers aren't accessed explicitly, and there are no side
  853. /// effects that aren't captured by the operands or other flags.
  854. bool MachineLICMBase::IsLoopInvariantInst(MachineInstr &I) {
  855. if (!IsLICMCandidate(I))
  856. return false;
  857. // The instruction is loop invariant if all of its operands are.
  858. for (const MachineOperand &MO : I.operands()) {
  859. if (!MO.isReg())
  860. continue;
  861. Register Reg = MO.getReg();
  862. if (Reg == 0) continue;
  863. // Don't hoist an instruction that uses or defines a physical register.
  864. if (Register::isPhysicalRegister(Reg)) {
  865. if (MO.isUse()) {
  866. // If the physreg has no defs anywhere, it's just an ambient register
  867. // and we can freely move its uses. Alternatively, if it's allocatable,
  868. // it could get allocated to something with a def during allocation.
  869. // However, if the physreg is known to always be caller saved/restored
  870. // then this use is safe to hoist.
  871. if (!MRI->isConstantPhysReg(Reg) &&
  872. !(TRI->isCallerPreservedPhysReg(Reg, *I.getMF())))
  873. return false;
  874. // Otherwise it's safe to move.
  875. continue;
  876. } else if (!MO.isDead()) {
  877. // A def that isn't dead. We can't move it.
  878. return false;
  879. } else if (CurLoop->getHeader()->isLiveIn(Reg)) {
  880. // If the reg is live into the loop, we can't hoist an instruction
  881. // which would clobber it.
  882. return false;
  883. }
  884. }
  885. if (!MO.isUse())
  886. continue;
  887. assert(MRI->getVRegDef(Reg) &&
  888. "Machine instr not mapped for this vreg?!");
  889. // If the loop contains the definition of an operand, then the instruction
  890. // isn't loop invariant.
  891. if (CurLoop->contains(MRI->getVRegDef(Reg)))
  892. return false;
  893. }
  894. // If we got this far, the instruction is loop invariant!
  895. return true;
  896. }
  897. /// Return true if the specified instruction is used by a phi node and hoisting
  898. /// it could cause a copy to be inserted.
  899. bool MachineLICMBase::HasLoopPHIUse(const MachineInstr *MI) const {
  900. SmallVector<const MachineInstr*, 8> Work(1, MI);
  901. do {
  902. MI = Work.pop_back_val();
  903. for (const MachineOperand &MO : MI->operands()) {
  904. if (!MO.isReg() || !MO.isDef())
  905. continue;
  906. Register Reg = MO.getReg();
  907. if (!Register::isVirtualRegister(Reg))
  908. continue;
  909. for (MachineInstr &UseMI : MRI->use_instructions(Reg)) {
  910. // A PHI may cause a copy to be inserted.
  911. if (UseMI.isPHI()) {
  912. // A PHI inside the loop causes a copy because the live range of Reg is
  913. // extended across the PHI.
  914. if (CurLoop->contains(&UseMI))
  915. return true;
  916. // A PHI in an exit block can cause a copy to be inserted if the PHI
  917. // has multiple predecessors in the loop with different values.
  918. // For now, approximate by rejecting all exit blocks.
  919. if (isExitBlock(UseMI.getParent()))
  920. return true;
  921. continue;
  922. }
  923. // Look past copies as well.
  924. if (UseMI.isCopy() && CurLoop->contains(&UseMI))
  925. Work.push_back(&UseMI);
  926. }
  927. }
  928. } while (!Work.empty());
  929. return false;
  930. }
  931. /// Compute operand latency between a def of 'Reg' and an use in the current
  932. /// loop, return true if the target considered it high.
  933. bool MachineLICMBase::HasHighOperandLatency(MachineInstr &MI,
  934. unsigned DefIdx,
  935. unsigned Reg) const {
  936. if (MRI->use_nodbg_empty(Reg))
  937. return false;
  938. for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) {
  939. if (UseMI.isCopyLike())
  940. continue;
  941. if (!CurLoop->contains(UseMI.getParent()))
  942. continue;
  943. for (unsigned i = 0, e = UseMI.getNumOperands(); i != e; ++i) {
  944. const MachineOperand &MO = UseMI.getOperand(i);
  945. if (!MO.isReg() || !MO.isUse())
  946. continue;
  947. Register MOReg = MO.getReg();
  948. if (MOReg != Reg)
  949. continue;
  950. if (TII->hasHighOperandLatency(SchedModel, MRI, MI, DefIdx, UseMI, i))
  951. return true;
  952. }
  953. // Only look at the first in loop use.
  954. break;
  955. }
  956. return false;
  957. }
  958. /// Return true if the instruction is marked "cheap" or the operand latency
  959. /// between its def and a use is one or less.
  960. bool MachineLICMBase::IsCheapInstruction(MachineInstr &MI) const {
  961. if (TII->isAsCheapAsAMove(MI) || MI.isCopyLike())
  962. return true;
  963. bool isCheap = false;
  964. unsigned NumDefs = MI.getDesc().getNumDefs();
  965. for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
  966. MachineOperand &DefMO = MI.getOperand(i);
  967. if (!DefMO.isReg() || !DefMO.isDef())
  968. continue;
  969. --NumDefs;
  970. Register Reg = DefMO.getReg();
  971. if (Register::isPhysicalRegister(Reg))
  972. continue;
  973. if (!TII->hasLowDefLatency(SchedModel, MI, i))
  974. return false;
  975. isCheap = true;
  976. }
  977. return isCheap;
  978. }
  979. /// Visit BBs from header to current BB, check if hoisting an instruction of the
  980. /// given cost matrix can cause high register pressure.
  981. bool
  982. MachineLICMBase::CanCauseHighRegPressure(const DenseMap<unsigned, int>& Cost,
  983. bool CheapInstr) {
  984. for (const auto &RPIdAndCost : Cost) {
  985. if (RPIdAndCost.second <= 0)
  986. continue;
  987. unsigned Class = RPIdAndCost.first;
  988. int Limit = RegLimit[Class];
  989. // Don't hoist cheap instructions if they would increase register pressure,
  990. // even if we're under the limit.
  991. if (CheapInstr && !HoistCheapInsts)
  992. return true;
  993. for (const auto &RP : BackTrace)
  994. if (static_cast<int>(RP[Class]) + RPIdAndCost.second >= Limit)
  995. return true;
  996. }
  997. return false;
  998. }
  999. /// Traverse the back trace from header to the current block and update their
  1000. /// register pressures to reflect the effect of hoisting MI from the current
  1001. /// block to the preheader.
  1002. void MachineLICMBase::UpdateBackTraceRegPressure(const MachineInstr *MI) {
  1003. // First compute the 'cost' of the instruction, i.e. its contribution
  1004. // to register pressure.
  1005. auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/false,
  1006. /*ConsiderUnseenAsDef=*/false);
  1007. // Update register pressure of blocks from loop header to current block.
  1008. for (auto &RP : BackTrace)
  1009. for (const auto &RPIdAndCost : Cost)
  1010. RP[RPIdAndCost.first] += RPIdAndCost.second;
  1011. }
  1012. /// Return true if it is potentially profitable to hoist the given loop
  1013. /// invariant.
  1014. bool MachineLICMBase::IsProfitableToHoist(MachineInstr &MI) {
  1015. if (MI.isImplicitDef())
  1016. return true;
  1017. // Besides removing computation from the loop, hoisting an instruction has
  1018. // these effects:
  1019. //
  1020. // - The value defined by the instruction becomes live across the entire
  1021. // loop. This increases register pressure in the loop.
  1022. //
  1023. // - If the value is used by a PHI in the loop, a copy will be required for
  1024. // lowering the PHI after extending the live range.
  1025. //
  1026. // - When hoisting the last use of a value in the loop, that value no longer
  1027. // needs to be live in the loop. This lowers register pressure in the loop.
  1028. if (HoistConstStores && isCopyFeedingInvariantStore(MI, MRI, TRI))
  1029. return true;
  1030. bool CheapInstr = IsCheapInstruction(MI);
  1031. bool CreatesCopy = HasLoopPHIUse(&MI);
  1032. // Don't hoist a cheap instruction if it would create a copy in the loop.
  1033. if (CheapInstr && CreatesCopy) {
  1034. LLVM_DEBUG(dbgs() << "Won't hoist cheap instr with loop PHI use: " << MI);
  1035. return false;
  1036. }
  1037. // Rematerializable instructions should always be hoisted since the register
  1038. // allocator can just pull them down again when needed.
  1039. if (TII->isTriviallyReMaterializable(MI, AA))
  1040. return true;
  1041. // FIXME: If there are long latency loop-invariant instructions inside the
  1042. // loop at this point, why didn't the optimizer's LICM hoist them?
  1043. for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
  1044. const MachineOperand &MO = MI.getOperand(i);
  1045. if (!MO.isReg() || MO.isImplicit())
  1046. continue;
  1047. Register Reg = MO.getReg();
  1048. if (!Register::isVirtualRegister(Reg))
  1049. continue;
  1050. if (MO.isDef() && HasHighOperandLatency(MI, i, Reg)) {
  1051. LLVM_DEBUG(dbgs() << "Hoist High Latency: " << MI);
  1052. ++NumHighLatency;
  1053. return true;
  1054. }
  1055. }
  1056. // Estimate register pressure to determine whether to LICM the instruction.
  1057. // In low register pressure situation, we can be more aggressive about
  1058. // hoisting. Also, favors hoisting long latency instructions even in
  1059. // moderately high pressure situation.
  1060. // Cheap instructions will only be hoisted if they don't increase register
  1061. // pressure at all.
  1062. auto Cost = calcRegisterCost(&MI, /*ConsiderSeen=*/false,
  1063. /*ConsiderUnseenAsDef=*/false);
  1064. // Visit BBs from header to current BB, if hoisting this doesn't cause
  1065. // high register pressure, then it's safe to proceed.
  1066. if (!CanCauseHighRegPressure(Cost, CheapInstr)) {
  1067. LLVM_DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI);
  1068. ++NumLowRP;
  1069. return true;
  1070. }
  1071. // Don't risk increasing register pressure if it would create copies.
  1072. if (CreatesCopy) {
  1073. LLVM_DEBUG(dbgs() << "Won't hoist instr with loop PHI use: " << MI);
  1074. return false;
  1075. }
  1076. // Do not "speculate" in high register pressure situation. If an
  1077. // instruction is not guaranteed to be executed in the loop, it's best to be
  1078. // conservative.
  1079. if (AvoidSpeculation &&
  1080. (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) {
  1081. LLVM_DEBUG(dbgs() << "Won't speculate: " << MI);
  1082. return false;
  1083. }
  1084. // High register pressure situation, only hoist if the instruction is going
  1085. // to be remat'ed.
  1086. if (!TII->isTriviallyReMaterializable(MI, AA) &&
  1087. !MI.isDereferenceableInvariantLoad(AA)) {
  1088. LLVM_DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
  1089. return false;
  1090. }
  1091. return true;
  1092. }
  1093. /// Unfold a load from the given machineinstr if the load itself could be
  1094. /// hoisted. Return the unfolded and hoistable load, or null if the load
  1095. /// couldn't be unfolded or if it wouldn't be hoistable.
  1096. MachineInstr *MachineLICMBase::ExtractHoistableLoad(MachineInstr *MI) {
  1097. // Don't unfold simple loads.
  1098. if (MI->canFoldAsLoad())
  1099. return nullptr;
  1100. // If not, we may be able to unfold a load and hoist that.
  1101. // First test whether the instruction is loading from an amenable
  1102. // memory location.
  1103. if (!MI->isDereferenceableInvariantLoad(AA))
  1104. return nullptr;
  1105. // Next determine the register class for a temporary register.
  1106. unsigned LoadRegIndex;
  1107. unsigned NewOpc =
  1108. TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
  1109. /*UnfoldLoad=*/true,
  1110. /*UnfoldStore=*/false,
  1111. &LoadRegIndex);
  1112. if (NewOpc == 0) return nullptr;
  1113. const MCInstrDesc &MID = TII->get(NewOpc);
  1114. MachineFunction &MF = *MI->getMF();
  1115. const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF);
  1116. // Ok, we're unfolding. Create a temporary register and do the unfold.
  1117. Register Reg = MRI->createVirtualRegister(RC);
  1118. SmallVector<MachineInstr *, 2> NewMIs;
  1119. bool Success = TII->unfoldMemoryOperand(MF, *MI, Reg,
  1120. /*UnfoldLoad=*/true,
  1121. /*UnfoldStore=*/false, NewMIs);
  1122. (void)Success;
  1123. assert(Success &&
  1124. "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
  1125. "succeeded!");
  1126. assert(NewMIs.size() == 2 &&
  1127. "Unfolded a load into multiple instructions!");
  1128. MachineBasicBlock *MBB = MI->getParent();
  1129. MachineBasicBlock::iterator Pos = MI;
  1130. MBB->insert(Pos, NewMIs[0]);
  1131. MBB->insert(Pos, NewMIs[1]);
  1132. // If unfolding produced a load that wasn't loop-invariant or profitable to
  1133. // hoist, discard the new instructions and bail.
  1134. if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
  1135. NewMIs[0]->eraseFromParent();
  1136. NewMIs[1]->eraseFromParent();
  1137. return nullptr;
  1138. }
  1139. // Update register pressure for the unfolded instruction.
  1140. UpdateRegPressure(NewMIs[1]);
  1141. // Otherwise we successfully unfolded a load that we can hoist.
  1142. MI->eraseFromParent();
  1143. return NewMIs[0];
  1144. }
  1145. /// Initialize the CSE map with instructions that are in the current loop
  1146. /// preheader that may become duplicates of instructions that are hoisted
  1147. /// out of the loop.
  1148. void MachineLICMBase::InitCSEMap(MachineBasicBlock *BB) {
  1149. for (MachineInstr &MI : *BB)
  1150. CSEMap[MI.getOpcode()].push_back(&MI);
  1151. }
  1152. /// Find an instruction amount PrevMIs that is a duplicate of MI.
  1153. /// Return this instruction if it's found.
  1154. const MachineInstr*
  1155. MachineLICMBase::LookForDuplicate(const MachineInstr *MI,
  1156. std::vector<const MachineInstr*> &PrevMIs) {
  1157. for (const MachineInstr *PrevMI : PrevMIs)
  1158. if (TII->produceSameValue(*MI, *PrevMI, (PreRegAlloc ? MRI : nullptr)))
  1159. return PrevMI;
  1160. return nullptr;
  1161. }
  1162. /// Given a LICM'ed instruction, look for an instruction on the preheader that
  1163. /// computes the same value. If it's found, do a RAU on with the definition of
  1164. /// the existing instruction rather than hoisting the instruction to the
  1165. /// preheader.
  1166. bool MachineLICMBase::EliminateCSE(MachineInstr *MI,
  1167. DenseMap<unsigned, std::vector<const MachineInstr *>>::iterator &CI) {
  1168. // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
  1169. // the undef property onto uses.
  1170. if (CI == CSEMap.end() || MI->isImplicitDef())
  1171. return false;
  1172. if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
  1173. LLVM_DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
  1174. // Replace virtual registers defined by MI by their counterparts defined
  1175. // by Dup.
  1176. SmallVector<unsigned, 2> Defs;
  1177. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
  1178. const MachineOperand &MO = MI->getOperand(i);
  1179. // Physical registers may not differ here.
  1180. assert((!MO.isReg() || MO.getReg() == 0 ||
  1181. !Register::isPhysicalRegister(MO.getReg()) ||
  1182. MO.getReg() == Dup->getOperand(i).getReg()) &&
  1183. "Instructions with different phys regs are not identical!");
  1184. if (MO.isReg() && MO.isDef() &&
  1185. !Register::isPhysicalRegister(MO.getReg()))
  1186. Defs.push_back(i);
  1187. }
  1188. SmallVector<const TargetRegisterClass*, 2> OrigRCs;
  1189. for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
  1190. unsigned Idx = Defs[i];
  1191. Register Reg = MI->getOperand(Idx).getReg();
  1192. Register DupReg = Dup->getOperand(Idx).getReg();
  1193. OrigRCs.push_back(MRI->getRegClass(DupReg));
  1194. if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
  1195. // Restore old RCs if more than one defs.
  1196. for (unsigned j = 0; j != i; ++j)
  1197. MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
  1198. return false;
  1199. }
  1200. }
  1201. for (unsigned Idx : Defs) {
  1202. Register Reg = MI->getOperand(Idx).getReg();
  1203. Register DupReg = Dup->getOperand(Idx).getReg();
  1204. MRI->replaceRegWith(Reg, DupReg);
  1205. MRI->clearKillFlags(DupReg);
  1206. }
  1207. MI->eraseFromParent();
  1208. ++NumCSEed;
  1209. return true;
  1210. }
  1211. return false;
  1212. }
  1213. /// Return true if the given instruction will be CSE'd if it's hoisted out of
  1214. /// the loop.
  1215. bool MachineLICMBase::MayCSE(MachineInstr *MI) {
  1216. unsigned Opcode = MI->getOpcode();
  1217. DenseMap<unsigned, std::vector<const MachineInstr *>>::iterator
  1218. CI = CSEMap.find(Opcode);
  1219. // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
  1220. // the undef property onto uses.
  1221. if (CI == CSEMap.end() || MI->isImplicitDef())
  1222. return false;
  1223. return LookForDuplicate(MI, CI->second) != nullptr;
  1224. }
  1225. /// When an instruction is found to use only loop invariant operands
  1226. /// that are safe to hoist, this instruction is called to do the dirty work.
  1227. /// It returns true if the instruction is hoisted.
  1228. bool MachineLICMBase::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
  1229. // First check whether we should hoist this instruction.
  1230. if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
  1231. // If not, try unfolding a hoistable load.
  1232. MI = ExtractHoistableLoad(MI);
  1233. if (!MI) return false;
  1234. }
  1235. // If we have hoisted an instruction that may store, it can only be a constant
  1236. // store.
  1237. if (MI->mayStore())
  1238. NumStoreConst++;
  1239. // Now move the instructions to the predecessor, inserting it before any
  1240. // terminator instructions.
  1241. LLVM_DEBUG({
  1242. dbgs() << "Hoisting " << *MI;
  1243. if (MI->getParent()->getBasicBlock())
  1244. dbgs() << " from " << printMBBReference(*MI->getParent());
  1245. if (Preheader->getBasicBlock())
  1246. dbgs() << " to " << printMBBReference(*Preheader);
  1247. dbgs() << "\n";
  1248. });
  1249. // If this is the first instruction being hoisted to the preheader,
  1250. // initialize the CSE map with potential common expressions.
  1251. if (FirstInLoop) {
  1252. InitCSEMap(Preheader);
  1253. FirstInLoop = false;
  1254. }
  1255. // Look for opportunity to CSE the hoisted instruction.
  1256. unsigned Opcode = MI->getOpcode();
  1257. DenseMap<unsigned, std::vector<const MachineInstr *>>::iterator
  1258. CI = CSEMap.find(Opcode);
  1259. if (!EliminateCSE(MI, CI)) {
  1260. // Otherwise, splice the instruction to the preheader.
  1261. Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
  1262. // Since we are moving the instruction out of its basic block, we do not
  1263. // retain its debug location. Doing so would degrade the debugging
  1264. // experience and adversely affect the accuracy of profiling information.
  1265. MI->setDebugLoc(DebugLoc());
  1266. // Update register pressure for BBs from header to this block.
  1267. UpdateBackTraceRegPressure(MI);
  1268. // Clear the kill flags of any register this instruction defines,
  1269. // since they may need to be live throughout the entire loop
  1270. // rather than just live for part of it.
  1271. for (MachineOperand &MO : MI->operands())
  1272. if (MO.isReg() && MO.isDef() && !MO.isDead())
  1273. MRI->clearKillFlags(MO.getReg());
  1274. // Add to the CSE map.
  1275. if (CI != CSEMap.end())
  1276. CI->second.push_back(MI);
  1277. else
  1278. CSEMap[Opcode].push_back(MI);
  1279. }
  1280. ++NumHoisted;
  1281. Changed = true;
  1282. return true;
  1283. }
  1284. /// Get the preheader for the current loop, splitting a critical edge if needed.
  1285. MachineBasicBlock *MachineLICMBase::getCurPreheader() {
  1286. // Determine the block to which to hoist instructions. If we can't find a
  1287. // suitable loop predecessor, we can't do any hoisting.
  1288. // If we've tried to get a preheader and failed, don't try again.
  1289. if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
  1290. return nullptr;
  1291. if (!CurPreheader) {
  1292. CurPreheader = CurLoop->getLoopPreheader();
  1293. if (!CurPreheader) {
  1294. MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
  1295. if (!Pred) {
  1296. CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
  1297. return nullptr;
  1298. }
  1299. CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), *this);
  1300. if (!CurPreheader) {
  1301. CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
  1302. return nullptr;
  1303. }
  1304. }
  1305. }
  1306. return CurPreheader;
  1307. }