LoopUnrollPeel.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. //===- UnrollLoopPeel.cpp - Loop peeling utilities ------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements some loop unrolling utilities for peeling loops
  10. // with dynamically inferred (from PGO) trip counts. See LoopUnroll.cpp for
  11. // unrolling loops with compile-time constant trip counts.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "llvm/ADT/DenseMap.h"
  15. #include "llvm/ADT/Optional.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/ADT/Statistic.h"
  18. #include "llvm/Analysis/LoopInfo.h"
  19. #include "llvm/Analysis/LoopIterator.h"
  20. #include "llvm/Analysis/ScalarEvolution.h"
  21. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  22. #include "llvm/Analysis/TargetTransformInfo.h"
  23. #include "llvm/IR/BasicBlock.h"
  24. #include "llvm/IR/Dominators.h"
  25. #include "llvm/IR/Function.h"
  26. #include "llvm/IR/InstrTypes.h"
  27. #include "llvm/IR/Instruction.h"
  28. #include "llvm/IR/Instructions.h"
  29. #include "llvm/IR/LLVMContext.h"
  30. #include "llvm/IR/MDBuilder.h"
  31. #include "llvm/IR/Metadata.h"
  32. #include "llvm/IR/PatternMatch.h"
  33. #include "llvm/Support/Casting.h"
  34. #include "llvm/Support/CommandLine.h"
  35. #include "llvm/Support/Debug.h"
  36. #include "llvm/Support/raw_ostream.h"
  37. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  38. #include "llvm/Transforms/Utils/Cloning.h"
  39. #include "llvm/Transforms/Utils/LoopSimplify.h"
  40. #include "llvm/Transforms/Utils/LoopUtils.h"
  41. #include "llvm/Transforms/Utils/UnrollLoop.h"
  42. #include "llvm/Transforms/Utils/ValueMapper.h"
  43. #include <algorithm>
  44. #include <cassert>
  45. #include <cstdint>
  46. #include <limits>
  47. using namespace llvm;
  48. using namespace llvm::PatternMatch;
  49. #define DEBUG_TYPE "loop-unroll"
  50. STATISTIC(NumPeeled, "Number of loops peeled");
  51. static cl::opt<unsigned> UnrollPeelMaxCount(
  52. "unroll-peel-max-count", cl::init(7), cl::Hidden,
  53. cl::desc("Max average trip count which will cause loop peeling."));
  54. static cl::opt<unsigned> UnrollForcePeelCount(
  55. "unroll-force-peel-count", cl::init(0), cl::Hidden,
  56. cl::desc("Force a peel count regardless of profiling information."));
  57. // Designates that a Phi is estimated to become invariant after an "infinite"
  58. // number of loop iterations (i.e. only may become an invariant if the loop is
  59. // fully unrolled).
  60. static const unsigned InfiniteIterationsToInvariance =
  61. std::numeric_limits<unsigned>::max();
  62. // Check whether we are capable of peeling this loop.
  63. bool llvm::canPeel(Loop *L) {
  64. // Make sure the loop is in simplified form
  65. if (!L->isLoopSimplifyForm())
  66. return false;
  67. // Only peel loops that contain a single exit
  68. if (!L->getExitingBlock() || !L->getUniqueExitBlock())
  69. return false;
  70. // Don't try to peel loops where the latch is not the exiting block.
  71. // This can be an indication of two different things:
  72. // 1) The loop is not rotated.
  73. // 2) The loop contains irreducible control flow that involves the latch.
  74. if (L->getLoopLatch() != L->getExitingBlock())
  75. return false;
  76. return true;
  77. }
  78. // This function calculates the number of iterations after which the given Phi
  79. // becomes an invariant. The pre-calculated values are memorized in the map. The
  80. // function (shortcut is I) is calculated according to the following definition:
  81. // Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge].
  82. // If %y is a loop invariant, then I(%x) = 1.
  83. // If %y is a Phi from the loop header, I(%x) = I(%y) + 1.
  84. // Otherwise, I(%x) is infinite.
  85. // TODO: Actually if %y is an expression that depends only on Phi %z and some
  86. // loop invariants, we can estimate I(%x) = I(%z) + 1. The example
  87. // looks like:
  88. // %x = phi(0, %a), <-- becomes invariant starting from 3rd iteration.
  89. // %y = phi(0, 5),
  90. // %a = %y + 1.
  91. static unsigned calculateIterationsToInvariance(
  92. PHINode *Phi, Loop *L, BasicBlock *BackEdge,
  93. SmallDenseMap<PHINode *, unsigned> &IterationsToInvariance) {
  94. assert(Phi->getParent() == L->getHeader() &&
  95. "Non-loop Phi should not be checked for turning into invariant.");
  96. assert(BackEdge == L->getLoopLatch() && "Wrong latch?");
  97. // If we already know the answer, take it from the map.
  98. auto I = IterationsToInvariance.find(Phi);
  99. if (I != IterationsToInvariance.end())
  100. return I->second;
  101. // Otherwise we need to analyze the input from the back edge.
  102. Value *Input = Phi->getIncomingValueForBlock(BackEdge);
  103. // Place infinity to map to avoid infinite recursion for cycled Phis. Such
  104. // cycles can never stop on an invariant.
  105. IterationsToInvariance[Phi] = InfiniteIterationsToInvariance;
  106. unsigned ToInvariance = InfiniteIterationsToInvariance;
  107. if (L->isLoopInvariant(Input))
  108. ToInvariance = 1u;
  109. else if (PHINode *IncPhi = dyn_cast<PHINode>(Input)) {
  110. // Only consider Phis in header block.
  111. if (IncPhi->getParent() != L->getHeader())
  112. return InfiniteIterationsToInvariance;
  113. // If the input becomes an invariant after X iterations, then our Phi
  114. // becomes an invariant after X + 1 iterations.
  115. unsigned InputToInvariance = calculateIterationsToInvariance(
  116. IncPhi, L, BackEdge, IterationsToInvariance);
  117. if (InputToInvariance != InfiniteIterationsToInvariance)
  118. ToInvariance = InputToInvariance + 1u;
  119. }
  120. // If we found that this Phi lies in an invariant chain, update the map.
  121. if (ToInvariance != InfiniteIterationsToInvariance)
  122. IterationsToInvariance[Phi] = ToInvariance;
  123. return ToInvariance;
  124. }
  125. // Return the number of iterations to peel off that make conditions in the
  126. // body true/false. For example, if we peel 2 iterations off the loop below,
  127. // the condition i < 2 can be evaluated at compile time.
  128. // for (i = 0; i < n; i++)
  129. // if (i < 2)
  130. // ..
  131. // else
  132. // ..
  133. // }
  134. static unsigned countToEliminateCompares(Loop &L, unsigned MaxPeelCount,
  135. ScalarEvolution &SE) {
  136. assert(L.isLoopSimplifyForm() && "Loop needs to be in loop simplify form");
  137. unsigned DesiredPeelCount = 0;
  138. for (auto *BB : L.blocks()) {
  139. auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
  140. if (!BI || BI->isUnconditional())
  141. continue;
  142. // Ignore loop exit condition.
  143. if (L.getLoopLatch() == BB)
  144. continue;
  145. Value *Condition = BI->getCondition();
  146. Value *LeftVal, *RightVal;
  147. CmpInst::Predicate Pred;
  148. if (!match(Condition, m_ICmp(Pred, m_Value(LeftVal), m_Value(RightVal))))
  149. continue;
  150. const SCEV *LeftSCEV = SE.getSCEV(LeftVal);
  151. const SCEV *RightSCEV = SE.getSCEV(RightVal);
  152. // Do not consider predicates that are known to be true or false
  153. // independently of the loop iteration.
  154. if (SE.isKnownPredicate(Pred, LeftSCEV, RightSCEV) ||
  155. SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), LeftSCEV,
  156. RightSCEV))
  157. continue;
  158. // Check if we have a condition with one AddRec and one non AddRec
  159. // expression. Normalize LeftSCEV to be the AddRec.
  160. if (!isa<SCEVAddRecExpr>(LeftSCEV)) {
  161. if (isa<SCEVAddRecExpr>(RightSCEV)) {
  162. std::swap(LeftSCEV, RightSCEV);
  163. Pred = ICmpInst::getSwappedPredicate(Pred);
  164. } else
  165. continue;
  166. }
  167. const SCEVAddRecExpr *LeftAR = cast<SCEVAddRecExpr>(LeftSCEV);
  168. // Avoid huge SCEV computations in the loop below, make sure we only
  169. // consider AddRecs of the loop we are trying to peel and avoid
  170. // non-monotonic predicates, as we will not be able to simplify the loop
  171. // body.
  172. // FIXME: For the non-monotonic predicates ICMP_EQ and ICMP_NE we can
  173. // simplify the loop, if we peel 1 additional iteration, if there
  174. // is no wrapping.
  175. bool Increasing;
  176. if (!LeftAR->isAffine() || LeftAR->getLoop() != &L ||
  177. !SE.isMonotonicPredicate(LeftAR, Pred, Increasing))
  178. continue;
  179. (void)Increasing;
  180. // Check if extending the current DesiredPeelCount lets us evaluate Pred
  181. // or !Pred in the loop body statically.
  182. unsigned NewPeelCount = DesiredPeelCount;
  183. const SCEV *IterVal = LeftAR->evaluateAtIteration(
  184. SE.getConstant(LeftSCEV->getType(), NewPeelCount), SE);
  185. // If the original condition is not known, get the negated predicate
  186. // (which holds on the else branch) and check if it is known. This allows
  187. // us to peel of iterations that make the original condition false.
  188. if (!SE.isKnownPredicate(Pred, IterVal, RightSCEV))
  189. Pred = ICmpInst::getInversePredicate(Pred);
  190. const SCEV *Step = LeftAR->getStepRecurrence(SE);
  191. while (NewPeelCount < MaxPeelCount &&
  192. SE.isKnownPredicate(Pred, IterVal, RightSCEV)) {
  193. IterVal = SE.getAddExpr(IterVal, Step);
  194. NewPeelCount++;
  195. }
  196. // Only peel the loop if the monotonic predicate !Pred becomes known in the
  197. // first iteration of the loop body after peeling.
  198. if (NewPeelCount > DesiredPeelCount &&
  199. SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), IterVal,
  200. RightSCEV))
  201. DesiredPeelCount = NewPeelCount;
  202. }
  203. return DesiredPeelCount;
  204. }
  205. // Return the number of iterations we want to peel off.
  206. void llvm::computePeelCount(Loop *L, unsigned LoopSize,
  207. TargetTransformInfo::UnrollingPreferences &UP,
  208. unsigned &TripCount, ScalarEvolution &SE) {
  209. assert(LoopSize > 0 && "Zero loop size is not allowed!");
  210. // Save the UP.PeelCount value set by the target in
  211. // TTI.getUnrollingPreferences or by the flag -unroll-peel-count.
  212. unsigned TargetPeelCount = UP.PeelCount;
  213. UP.PeelCount = 0;
  214. if (!canPeel(L))
  215. return;
  216. // Only try to peel innermost loops.
  217. if (!L->empty())
  218. return;
  219. // If the user provided a peel count, use that.
  220. bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0;
  221. if (UserPeelCount) {
  222. LLVM_DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount
  223. << " iterations.\n");
  224. UP.PeelCount = UnrollForcePeelCount;
  225. return;
  226. }
  227. // Skip peeling if it's disabled.
  228. if (!UP.AllowPeeling)
  229. return;
  230. // Here we try to get rid of Phis which become invariants after 1, 2, ..., N
  231. // iterations of the loop. For this we compute the number for iterations after
  232. // which every Phi is guaranteed to become an invariant, and try to peel the
  233. // maximum number of iterations among these values, thus turning all those
  234. // Phis into invariants.
  235. // First, check that we can peel at least one iteration.
  236. if (2 * LoopSize <= UP.Threshold && UnrollPeelMaxCount > 0) {
  237. // Store the pre-calculated values here.
  238. SmallDenseMap<PHINode *, unsigned> IterationsToInvariance;
  239. // Now go through all Phis to calculate their the number of iterations they
  240. // need to become invariants.
  241. // Start the max computation with the UP.PeelCount value set by the target
  242. // in TTI.getUnrollingPreferences or by the flag -unroll-peel-count.
  243. unsigned DesiredPeelCount = TargetPeelCount;
  244. BasicBlock *BackEdge = L->getLoopLatch();
  245. assert(BackEdge && "Loop is not in simplified form?");
  246. for (auto BI = L->getHeader()->begin(); isa<PHINode>(&*BI); ++BI) {
  247. PHINode *Phi = cast<PHINode>(&*BI);
  248. unsigned ToInvariance = calculateIterationsToInvariance(
  249. Phi, L, BackEdge, IterationsToInvariance);
  250. if (ToInvariance != InfiniteIterationsToInvariance)
  251. DesiredPeelCount = std::max(DesiredPeelCount, ToInvariance);
  252. }
  253. // Pay respect to limitations implied by loop size and the max peel count.
  254. unsigned MaxPeelCount = UnrollPeelMaxCount;
  255. MaxPeelCount = std::min(MaxPeelCount, UP.Threshold / LoopSize - 1);
  256. DesiredPeelCount = std::max(DesiredPeelCount,
  257. countToEliminateCompares(*L, MaxPeelCount, SE));
  258. if (DesiredPeelCount > 0) {
  259. DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount);
  260. // Consider max peel count limitation.
  261. assert(DesiredPeelCount > 0 && "Wrong loop size estimation?");
  262. LLVM_DEBUG(dbgs() << "Peel " << DesiredPeelCount
  263. << " iteration(s) to turn"
  264. << " some Phis into invariants.\n");
  265. UP.PeelCount = DesiredPeelCount;
  266. return;
  267. }
  268. }
  269. // Bail if we know the statically calculated trip count.
  270. // In this case we rather prefer partial unrolling.
  271. if (TripCount)
  272. return;
  273. // If we don't know the trip count, but have reason to believe the average
  274. // trip count is low, peeling should be beneficial, since we will usually
  275. // hit the peeled section.
  276. // We only do this in the presence of profile information, since otherwise
  277. // our estimates of the trip count are not reliable enough.
  278. if (L->getHeader()->getParent()->hasProfileData()) {
  279. Optional<unsigned> PeelCount = getLoopEstimatedTripCount(L);
  280. if (!PeelCount)
  281. return;
  282. LLVM_DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount
  283. << "\n");
  284. if (*PeelCount) {
  285. if ((*PeelCount <= UnrollPeelMaxCount) &&
  286. (LoopSize * (*PeelCount + 1) <= UP.Threshold)) {
  287. LLVM_DEBUG(dbgs() << "Peeling first " << *PeelCount
  288. << " iterations.\n");
  289. UP.PeelCount = *PeelCount;
  290. return;
  291. }
  292. LLVM_DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n");
  293. LLVM_DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n");
  294. LLVM_DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1)
  295. << "\n");
  296. LLVM_DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n");
  297. }
  298. }
  299. }
  300. /// Update the branch weights of the latch of a peeled-off loop
  301. /// iteration.
  302. /// This sets the branch weights for the latch of the recently peeled off loop
  303. /// iteration correctly.
  304. /// Our goal is to make sure that:
  305. /// a) The total weight of all the copies of the loop body is preserved.
  306. /// b) The total weight of the loop exit is preserved.
  307. /// c) The body weight is reasonably distributed between the peeled iterations.
  308. ///
  309. /// \param Header The copy of the header block that belongs to next iteration.
  310. /// \param LatchBR The copy of the latch branch that belongs to this iteration.
  311. /// \param IterNumber The serial number of the iteration that was just
  312. /// peeled off.
  313. /// \param AvgIters The average number of iterations we expect the loop to have.
  314. /// \param[in,out] PeeledHeaderWeight The total number of dynamic loop
  315. /// iterations that are unaccounted for. As an input, it represents the number
  316. /// of times we expect to enter the header of the iteration currently being
  317. /// peeled off. The output is the number of times we expect to enter the
  318. /// header of the next iteration.
  319. static void updateBranchWeights(BasicBlock *Header, BranchInst *LatchBR,
  320. unsigned IterNumber, unsigned AvgIters,
  321. uint64_t &PeeledHeaderWeight) {
  322. // FIXME: Pick a more realistic distribution.
  323. // Currently the proportion of weight we assign to the fall-through
  324. // side of the branch drops linearly with the iteration number, and we use
  325. // a 0.9 fudge factor to make the drop-off less sharp...
  326. if (PeeledHeaderWeight) {
  327. uint64_t FallThruWeight =
  328. PeeledHeaderWeight * ((float)(AvgIters - IterNumber) / AvgIters * 0.9);
  329. uint64_t ExitWeight = PeeledHeaderWeight - FallThruWeight;
  330. PeeledHeaderWeight -= ExitWeight;
  331. unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
  332. MDBuilder MDB(LatchBR->getContext());
  333. MDNode *WeightNode =
  334. HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThruWeight)
  335. : MDB.createBranchWeights(FallThruWeight, ExitWeight);
  336. LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
  337. }
  338. }
  339. /// Clones the body of the loop L, putting it between \p InsertTop and \p
  340. /// InsertBot.
  341. /// \param IterNumber The serial number of the iteration currently being
  342. /// peeled off.
  343. /// \param Exit The exit block of the original loop.
  344. /// \param[out] NewBlocks A list of the blocks in the newly created clone
  345. /// \param[out] VMap The value map between the loop and the new clone.
  346. /// \param LoopBlocks A helper for DFS-traversal of the loop.
  347. /// \param LVMap A value-map that maps instructions from the original loop to
  348. /// instructions in the last peeled-off iteration.
  349. static void cloneLoopBlocks(Loop *L, unsigned IterNumber, BasicBlock *InsertTop,
  350. BasicBlock *InsertBot, BasicBlock *Exit,
  351. SmallVectorImpl<BasicBlock *> &NewBlocks,
  352. LoopBlocksDFS &LoopBlocks, ValueToValueMapTy &VMap,
  353. ValueToValueMapTy &LVMap, DominatorTree *DT,
  354. LoopInfo *LI) {
  355. BasicBlock *Header = L->getHeader();
  356. BasicBlock *Latch = L->getLoopLatch();
  357. BasicBlock *PreHeader = L->getLoopPreheader();
  358. Function *F = Header->getParent();
  359. LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO();
  360. LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO();
  361. Loop *ParentLoop = L->getParentLoop();
  362. // For each block in the original loop, create a new copy,
  363. // and update the value map with the newly created values.
  364. for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
  365. BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F);
  366. NewBlocks.push_back(NewBB);
  367. if (ParentLoop)
  368. ParentLoop->addBasicBlockToLoop(NewBB, *LI);
  369. VMap[*BB] = NewBB;
  370. // If dominator tree is available, insert nodes to represent cloned blocks.
  371. if (DT) {
  372. if (Header == *BB)
  373. DT->addNewBlock(NewBB, InsertTop);
  374. else {
  375. DomTreeNode *IDom = DT->getNode(*BB)->getIDom();
  376. // VMap must contain entry for IDom, as the iteration order is RPO.
  377. DT->addNewBlock(NewBB, cast<BasicBlock>(VMap[IDom->getBlock()]));
  378. }
  379. }
  380. }
  381. // Hook-up the control flow for the newly inserted blocks.
  382. // The new header is hooked up directly to the "top", which is either
  383. // the original loop preheader (for the first iteration) or the previous
  384. // iteration's exiting block (for every other iteration)
  385. InsertTop->getTerminator()->setSuccessor(0, cast<BasicBlock>(VMap[Header]));
  386. // Similarly, for the latch:
  387. // The original exiting edge is still hooked up to the loop exit.
  388. // The backedge now goes to the "bottom", which is either the loop's real
  389. // header (for the last peeled iteration) or the copied header of the next
  390. // iteration (for every other iteration)
  391. BasicBlock *NewLatch = cast<BasicBlock>(VMap[Latch]);
  392. BranchInst *LatchBR = cast<BranchInst>(NewLatch->getTerminator());
  393. unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
  394. LatchBR->setSuccessor(HeaderIdx, InsertBot);
  395. LatchBR->setSuccessor(1 - HeaderIdx, Exit);
  396. if (DT)
  397. DT->changeImmediateDominator(InsertBot, NewLatch);
  398. // The new copy of the loop body starts with a bunch of PHI nodes
  399. // that pick an incoming value from either the preheader, or the previous
  400. // loop iteration. Since this copy is no longer part of the loop, we
  401. // resolve this statically:
  402. // For the first iteration, we use the value from the preheader directly.
  403. // For any other iteration, we replace the phi with the value generated by
  404. // the immediately preceding clone of the loop body (which represents
  405. // the previous iteration).
  406. for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
  407. PHINode *NewPHI = cast<PHINode>(VMap[&*I]);
  408. if (IterNumber == 0) {
  409. VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader);
  410. } else {
  411. Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch);
  412. Instruction *LatchInst = dyn_cast<Instruction>(LatchVal);
  413. if (LatchInst && L->contains(LatchInst))
  414. VMap[&*I] = LVMap[LatchInst];
  415. else
  416. VMap[&*I] = LatchVal;
  417. }
  418. cast<BasicBlock>(VMap[Header])->getInstList().erase(NewPHI);
  419. }
  420. // Fix up the outgoing values - we need to add a value for the iteration
  421. // we've just created. Note that this must happen *after* the incoming
  422. // values are adjusted, since the value going out of the latch may also be
  423. // a value coming into the header.
  424. for (BasicBlock::iterator I = Exit->begin(); isa<PHINode>(I); ++I) {
  425. PHINode *PHI = cast<PHINode>(I);
  426. Value *LatchVal = PHI->getIncomingValueForBlock(Latch);
  427. Instruction *LatchInst = dyn_cast<Instruction>(LatchVal);
  428. if (LatchInst && L->contains(LatchInst))
  429. LatchVal = VMap[LatchVal];
  430. PHI->addIncoming(LatchVal, cast<BasicBlock>(VMap[Latch]));
  431. }
  432. // LastValueMap is updated with the values for the current loop
  433. // which are used the next time this function is called.
  434. for (const auto &KV : VMap)
  435. LVMap[KV.first] = KV.second;
  436. }
  437. /// Peel off the first \p PeelCount iterations of loop \p L.
  438. ///
  439. /// Note that this does not peel them off as a single straight-line block.
  440. /// Rather, each iteration is peeled off separately, and needs to check the
  441. /// exit condition.
  442. /// For loops that dynamically execute \p PeelCount iterations or less
  443. /// this provides a benefit, since the peeled off iterations, which account
  444. /// for the bulk of dynamic execution, can be further simplified by scalar
  445. /// optimizations.
  446. bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI,
  447. ScalarEvolution *SE, DominatorTree *DT,
  448. AssumptionCache *AC, bool PreserveLCSSA) {
  449. assert(PeelCount > 0 && "Attempt to peel out zero iterations?");
  450. assert(canPeel(L) && "Attempt to peel a loop which is not peelable?");
  451. LoopBlocksDFS LoopBlocks(L);
  452. LoopBlocks.perform(LI);
  453. BasicBlock *Header = L->getHeader();
  454. BasicBlock *PreHeader = L->getLoopPreheader();
  455. BasicBlock *Latch = L->getLoopLatch();
  456. BasicBlock *Exit = L->getUniqueExitBlock();
  457. Function *F = Header->getParent();
  458. // Set up all the necessary basic blocks. It is convenient to split the
  459. // preheader into 3 parts - two blocks to anchor the peeled copy of the loop
  460. // body, and a new preheader for the "real" loop.
  461. // Peeling the first iteration transforms.
  462. //
  463. // PreHeader:
  464. // ...
  465. // Header:
  466. // LoopBody
  467. // If (cond) goto Header
  468. // Exit:
  469. //
  470. // into
  471. //
  472. // InsertTop:
  473. // LoopBody
  474. // If (!cond) goto Exit
  475. // InsertBot:
  476. // NewPreHeader:
  477. // ...
  478. // Header:
  479. // LoopBody
  480. // If (cond) goto Header
  481. // Exit:
  482. //
  483. // Each following iteration will split the current bottom anchor in two,
  484. // and put the new copy of the loop body between these two blocks. That is,
  485. // after peeling another iteration from the example above, we'll split
  486. // InsertBot, and get:
  487. //
  488. // InsertTop:
  489. // LoopBody
  490. // If (!cond) goto Exit
  491. // InsertBot:
  492. // LoopBody
  493. // If (!cond) goto Exit
  494. // InsertBot.next:
  495. // NewPreHeader:
  496. // ...
  497. // Header:
  498. // LoopBody
  499. // If (cond) goto Header
  500. // Exit:
  501. BasicBlock *InsertTop = SplitEdge(PreHeader, Header, DT, LI);
  502. BasicBlock *InsertBot =
  503. SplitBlock(InsertTop, InsertTop->getTerminator(), DT, LI);
  504. BasicBlock *NewPreHeader =
  505. SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI);
  506. InsertTop->setName(Header->getName() + ".peel.begin");
  507. InsertBot->setName(Header->getName() + ".peel.next");
  508. NewPreHeader->setName(PreHeader->getName() + ".peel.newph");
  509. ValueToValueMapTy LVMap;
  510. // If we have branch weight information, we'll want to update it for the
  511. // newly created branches.
  512. BranchInst *LatchBR =
  513. cast<BranchInst>(cast<BasicBlock>(Latch)->getTerminator());
  514. unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
  515. uint64_t TrueWeight, FalseWeight;
  516. uint64_t ExitWeight = 0, CurHeaderWeight = 0;
  517. if (LatchBR->extractProfMetadata(TrueWeight, FalseWeight)) {
  518. ExitWeight = HeaderIdx ? TrueWeight : FalseWeight;
  519. // The # of times the loop body executes is the sum of the exit block
  520. // weight and the # of times the backedges are taken.
  521. CurHeaderWeight = TrueWeight + FalseWeight;
  522. }
  523. // For each peeled-off iteration, make a copy of the loop.
  524. for (unsigned Iter = 0; Iter < PeelCount; ++Iter) {
  525. SmallVector<BasicBlock *, 8> NewBlocks;
  526. ValueToValueMapTy VMap;
  527. // Subtract the exit weight from the current header weight -- the exit
  528. // weight is exactly the weight of the previous iteration's header.
  529. // FIXME: due to the way the distribution is constructed, we need a
  530. // guard here to make sure we don't end up with non-positive weights.
  531. if (ExitWeight < CurHeaderWeight)
  532. CurHeaderWeight -= ExitWeight;
  533. else
  534. CurHeaderWeight = 1;
  535. cloneLoopBlocks(L, Iter, InsertTop, InsertBot, Exit,
  536. NewBlocks, LoopBlocks, VMap, LVMap, DT, LI);
  537. // Remap to use values from the current iteration instead of the
  538. // previous one.
  539. remapInstructionsInBlocks(NewBlocks, VMap);
  540. if (DT) {
  541. // Latches of the cloned loops dominate over the loop exit, so idom of the
  542. // latter is the first cloned loop body, as original PreHeader dominates
  543. // the original loop body.
  544. if (Iter == 0)
  545. DT->changeImmediateDominator(Exit, cast<BasicBlock>(LVMap[Latch]));
  546. #ifdef EXPENSIVE_CHECKS
  547. assert(DT->verify(DominatorTree::VerificationLevel::Fast));
  548. #endif
  549. }
  550. auto *LatchBRCopy = cast<BranchInst>(VMap[LatchBR]);
  551. updateBranchWeights(InsertBot, LatchBRCopy, Iter,
  552. PeelCount, ExitWeight);
  553. // Remove Loop metadata from the latch branch instruction
  554. // because it is not the Loop's latch branch anymore.
  555. LatchBRCopy->setMetadata(LLVMContext::MD_loop, nullptr);
  556. InsertTop = InsertBot;
  557. InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI);
  558. InsertBot->setName(Header->getName() + ".peel.next");
  559. F->getBasicBlockList().splice(InsertTop->getIterator(),
  560. F->getBasicBlockList(),
  561. NewBlocks[0]->getIterator(), F->end());
  562. }
  563. // Now adjust the phi nodes in the loop header to get their initial values
  564. // from the last peeled-off iteration instead of the preheader.
  565. for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
  566. PHINode *PHI = cast<PHINode>(I);
  567. Value *NewVal = PHI->getIncomingValueForBlock(Latch);
  568. Instruction *LatchInst = dyn_cast<Instruction>(NewVal);
  569. if (LatchInst && L->contains(LatchInst))
  570. NewVal = LVMap[LatchInst];
  571. PHI->setIncomingValue(PHI->getBasicBlockIndex(NewPreHeader), NewVal);
  572. }
  573. // Adjust the branch weights on the loop exit.
  574. if (ExitWeight) {
  575. // The backedge count is the difference of current header weight and
  576. // current loop exit weight. If the current header weight is smaller than
  577. // the current loop exit weight, we mark the loop backedge weight as 1.
  578. uint64_t BackEdgeWeight = 0;
  579. if (ExitWeight < CurHeaderWeight)
  580. BackEdgeWeight = CurHeaderWeight - ExitWeight;
  581. else
  582. BackEdgeWeight = 1;
  583. MDBuilder MDB(LatchBR->getContext());
  584. MDNode *WeightNode =
  585. HeaderIdx ? MDB.createBranchWeights(ExitWeight, BackEdgeWeight)
  586. : MDB.createBranchWeights(BackEdgeWeight, ExitWeight);
  587. LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
  588. }
  589. if (Loop *ParentLoop = L->getParentLoop())
  590. L = ParentLoop;
  591. // We modified the loop, update SE.
  592. SE->forgetTopmostLoop(L);
  593. // FIXME: Incrementally update loop-simplify
  594. simplifyLoop(L, DT, LI, SE, AC, nullptr, PreserveLCSSA);
  595. NumPeeled++;
  596. return true;
  597. }