LoopUnrollPass.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. //===-- LoopUnroll.cpp - Loop unroller pass -------------------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass implements a simple loop unroller. It works best when loops have
  11. // been canonicalized by the -indvars pass, allowing it to determine the trip
  12. // counts of loops easily.
  13. //===----------------------------------------------------------------------===//
  14. #include "llvm/Transforms/Scalar.h"
  15. #include "llvm/ADT/SetVector.h"
  16. #include "llvm/Analysis/AssumptionCache.h"
  17. #include "llvm/Analysis/CodeMetrics.h"
  18. #include "llvm/Analysis/LoopPass.h"
  19. #include "llvm/Analysis/ScalarEvolution.h"
  20. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  21. #include "llvm/Analysis/TargetTransformInfo.h"
  22. #include "llvm/IR/DataLayout.h"
  23. #include "llvm/IR/DiagnosticInfo.h"
  24. #include "llvm/IR/Dominators.h"
  25. #include "llvm/IR/IntrinsicInst.h"
  26. #include "llvm/IR/Metadata.h"
  27. #include "llvm/Support/CommandLine.h"
  28. #include "llvm/Support/Debug.h"
  29. #include "llvm/Support/raw_ostream.h"
  30. #include "llvm/Transforms/Utils/UnrollLoop.h"
  31. #include "llvm/IR/InstVisitor.h"
  32. #include "llvm/Analysis/InstructionSimplify.h"
  33. #include <climits>
  34. using namespace llvm;
  35. #define DEBUG_TYPE "loop-unroll"
  36. static cl::opt<unsigned>
  37. UnrollThreshold("unroll-threshold", cl::init(150), cl::Hidden,
  38. cl::desc("The cut-off point for automatic loop unrolling"));
  39. static cl::opt<unsigned> UnrollMaxIterationsCountToAnalyze(
  40. "unroll-max-iteration-count-to-analyze", cl::init(0), cl::Hidden,
  41. cl::desc("Don't allow loop unrolling to simulate more than this number of"
  42. "iterations when checking full unroll profitability"));
  43. static cl::opt<unsigned> UnrollMinPercentOfOptimized(
  44. "unroll-percent-of-optimized-for-complete-unroll", cl::init(20), cl::Hidden,
  45. cl::desc("If complete unrolling could trigger further optimizations, and, "
  46. "by that, remove the given percent of instructions, perform the "
  47. "complete unroll even if it's beyond the threshold"));
  48. static cl::opt<unsigned> UnrollAbsoluteThreshold(
  49. "unroll-absolute-threshold", cl::init(2000), cl::Hidden,
  50. cl::desc("Don't unroll if the unrolled size is bigger than this threshold,"
  51. " even if we can remove big portion of instructions later."));
  52. static cl::opt<unsigned>
  53. UnrollCount("unroll-count", cl::init(0), cl::Hidden,
  54. cl::desc("Use this unroll count for all loops including those with "
  55. "unroll_count pragma values, for testing purposes"));
  56. static cl::opt<bool>
  57. UnrollAllowPartial("unroll-allow-partial", cl::init(false), cl::Hidden,
  58. cl::desc("Allows loops to be partially unrolled until "
  59. "-unroll-threshold loop size is reached."));
  60. static cl::opt<bool>
  61. UnrollRuntime("unroll-runtime", cl::ZeroOrMore, cl::init(false), cl::Hidden,
  62. cl::desc("Unroll loops with run-time trip counts"));
  63. static cl::opt<unsigned>
  64. PragmaUnrollThreshold("pragma-unroll-threshold", cl::init(16 * 1024), cl::Hidden,
  65. cl::desc("Unrolled size limit for loops with an unroll(full) or "
  66. "unroll_count pragma."));
  67. namespace {
  68. class LoopUnroll : public LoopPass {
  69. public:
  70. static char ID; // Pass ID, replacement for typeid
  71. LoopUnroll(int T = -1, int C = -1, int P = -1, int R = -1) : LoopPass(ID) {
  72. CurrentThreshold = (T == -1) ? UnrollThreshold : unsigned(T);
  73. CurrentAbsoluteThreshold = UnrollAbsoluteThreshold;
  74. CurrentMinPercentOfOptimized = UnrollMinPercentOfOptimized;
  75. CurrentCount = (C == -1) ? UnrollCount : unsigned(C);
  76. CurrentAllowPartial = (P == -1) ? UnrollAllowPartial : (bool)P;
  77. CurrentRuntime = (R == -1) ? UnrollRuntime : (bool)R;
  78. UserThreshold = (T != -1) || (UnrollThreshold.getNumOccurrences() > 0);
  79. UserAbsoluteThreshold = (UnrollAbsoluteThreshold.getNumOccurrences() > 0);
  80. UserPercentOfOptimized =
  81. (UnrollMinPercentOfOptimized.getNumOccurrences() > 0);
  82. UserAllowPartial = (P != -1) ||
  83. (UnrollAllowPartial.getNumOccurrences() > 0);
  84. UserRuntime = (R != -1) || (UnrollRuntime.getNumOccurrences() > 0);
  85. UserCount = (C != -1) || (UnrollCount.getNumOccurrences() > 0);
  86. initializeLoopUnrollPass(*PassRegistry::getPassRegistry());
  87. }
  88. /// A magic value for use with the Threshold parameter to indicate
  89. /// that the loop unroll should be performed regardless of how much
  90. /// code expansion would result.
  91. static const unsigned NoThreshold = UINT_MAX;
  92. // Threshold to use when optsize is specified (and there is no
  93. // explicit -unroll-threshold).
  94. static const unsigned OptSizeUnrollThreshold = 50;
  95. // Default unroll count for loops with run-time trip count if
  96. // -unroll-count is not set
  97. static const unsigned UnrollRuntimeCount = 8;
  98. unsigned CurrentCount;
  99. unsigned CurrentThreshold;
  100. unsigned CurrentAbsoluteThreshold;
  101. unsigned CurrentMinPercentOfOptimized;
  102. bool CurrentAllowPartial;
  103. bool CurrentRuntime;
  104. bool UserCount; // CurrentCount is user-specified.
  105. bool UserThreshold; // CurrentThreshold is user-specified.
  106. bool UserAbsoluteThreshold; // CurrentAbsoluteThreshold is
  107. // user-specified.
  108. bool UserPercentOfOptimized; // CurrentMinPercentOfOptimized is
  109. // user-specified.
  110. bool UserAllowPartial; // CurrentAllowPartial is user-specified.
  111. bool UserRuntime; // CurrentRuntime is user-specified.
  112. bool runOnLoop(Loop *L, LPPassManager &LPM) override;
  113. /// This transformation requires natural loop information & requires that
  114. /// loop preheaders be inserted into the CFG...
  115. ///
  116. void getAnalysisUsage(AnalysisUsage &AU) const override {
  117. AU.addRequired<AssumptionCacheTracker>();
  118. AU.addRequired<LoopInfoWrapperPass>();
  119. AU.addPreserved<LoopInfoWrapperPass>();
  120. AU.addRequiredID(LoopSimplifyID);
  121. AU.addPreservedID(LoopSimplifyID);
  122. AU.addRequiredID(LCSSAID);
  123. AU.addPreservedID(LCSSAID);
  124. AU.addRequired<ScalarEvolution>();
  125. AU.addPreserved<ScalarEvolution>();
  126. AU.addRequired<TargetTransformInfoWrapperPass>();
  127. // FIXME: Loop unroll requires LCSSA. And LCSSA requires dom info.
  128. // If loop unroll does not preserve dom info then LCSSA pass on next
  129. // loop will receive invalid dom info.
  130. // For now, recreate dom info, if loop is unrolled.
  131. AU.addPreserved<DominatorTreeWrapperPass>();
  132. }
  133. // Fill in the UnrollingPreferences parameter with values from the
  134. // TargetTransformationInfo.
  135. void getUnrollingPreferences(Loop *L, const TargetTransformInfo &TTI,
  136. TargetTransformInfo::UnrollingPreferences &UP) {
  137. UP.Threshold = CurrentThreshold;
  138. UP.AbsoluteThreshold = CurrentAbsoluteThreshold;
  139. UP.MinPercentOfOptimized = CurrentMinPercentOfOptimized;
  140. UP.OptSizeThreshold = OptSizeUnrollThreshold;
  141. UP.PartialThreshold = CurrentThreshold;
  142. UP.PartialOptSizeThreshold = OptSizeUnrollThreshold;
  143. UP.Count = CurrentCount;
  144. UP.MaxCount = UINT_MAX;
  145. UP.Partial = CurrentAllowPartial;
  146. UP.Runtime = CurrentRuntime;
  147. TTI.getUnrollingPreferences(L, UP);
  148. }
  149. // Select and return an unroll count based on parameters from
  150. // user, unroll preferences, unroll pragmas, or a heuristic.
  151. // SetExplicitly is set to true if the unroll count is is set by
  152. // the user or a pragma rather than selected heuristically.
  153. unsigned
  154. selectUnrollCount(const Loop *L, unsigned TripCount, bool PragmaFullUnroll,
  155. unsigned PragmaCount,
  156. const TargetTransformInfo::UnrollingPreferences &UP,
  157. bool &SetExplicitly);
  158. // Select threshold values used to limit unrolling based on a
  159. // total unrolled size. Parameters Threshold and PartialThreshold
  160. // are set to the maximum unrolled size for fully and partially
  161. // unrolled loops respectively.
  162. void selectThresholds(const Loop *L, bool HasPragma,
  163. const TargetTransformInfo::UnrollingPreferences &UP,
  164. unsigned &Threshold, unsigned &PartialThreshold,
  165. unsigned NumberOfOptimizedInstructions) {
  166. // Determine the current unrolling threshold. While this is
  167. // normally set from UnrollThreshold, it is overridden to a
  168. // smaller value if the current function is marked as
  169. // optimize-for-size, and the unroll threshold was not user
  170. // specified.
  171. Threshold = UserThreshold ? CurrentThreshold : UP.Threshold;
  172. // If we are allowed to completely unroll if we can remove M% of
  173. // instructions, and we know that with complete unrolling we'll be able
  174. // to kill N instructions, then we can afford to completely unroll loops
  175. // with unrolled size up to N*100/M.
  176. // Adjust the threshold according to that:
  177. unsigned PercentOfOptimizedForCompleteUnroll =
  178. UserPercentOfOptimized ? CurrentMinPercentOfOptimized
  179. : UP.MinPercentOfOptimized;
  180. unsigned AbsoluteThreshold = UserAbsoluteThreshold
  181. ? CurrentAbsoluteThreshold
  182. : UP.AbsoluteThreshold;
  183. if (PercentOfOptimizedForCompleteUnroll)
  184. Threshold = std::max<unsigned>(Threshold,
  185. NumberOfOptimizedInstructions * 100 /
  186. PercentOfOptimizedForCompleteUnroll);
  187. // But don't allow unrolling loops bigger than absolute threshold.
  188. Threshold = std::min<unsigned>(Threshold, AbsoluteThreshold);
  189. PartialThreshold = UserThreshold ? CurrentThreshold : UP.PartialThreshold;
  190. if (!UserThreshold &&
  191. L->getHeader()->getParent()->hasFnAttribute(
  192. Attribute::OptimizeForSize)) {
  193. Threshold = UP.OptSizeThreshold;
  194. PartialThreshold = UP.PartialOptSizeThreshold;
  195. }
  196. if (HasPragma) {
  197. // If the loop has an unrolling pragma, we want to be more
  198. // aggressive with unrolling limits. Set thresholds to at
  199. // least the PragmaTheshold value which is larger than the
  200. // default limits.
  201. if (Threshold != NoThreshold)
  202. Threshold = std::max<unsigned>(Threshold, PragmaUnrollThreshold);
  203. if (PartialThreshold != NoThreshold)
  204. PartialThreshold =
  205. std::max<unsigned>(PartialThreshold, PragmaUnrollThreshold);
  206. }
  207. }
  208. };
  209. }
  210. char LoopUnroll::ID = 0;
  211. INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
  212. INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
  213. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  214. INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
  215. INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
  216. INITIALIZE_PASS_DEPENDENCY(LCSSA)
  217. INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
  218. INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
  219. Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial,
  220. int Runtime) {
  221. return new LoopUnroll(Threshold, Count, AllowPartial, Runtime);
  222. }
  223. Pass *llvm::createSimpleLoopUnrollPass() {
  224. return llvm::createLoopUnrollPass(-1, -1, 0, 0);
  225. }
  226. static bool isLoadFromConstantInitializer(Value *V) {
  227. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
  228. if (GV->isConstant() && GV->hasDefinitiveInitializer())
  229. return GV->getInitializer();
  230. return false;
  231. }
  232. struct FindConstantPointers {
  233. bool LoadCanBeConstantFolded;
  234. bool IndexIsConstant;
  235. APInt Step;
  236. APInt StartValue;
  237. Value *BaseAddress;
  238. const Loop *L;
  239. ScalarEvolution &SE;
  240. FindConstantPointers(const Loop *loop, ScalarEvolution &SE)
  241. : LoadCanBeConstantFolded(true), IndexIsConstant(true), L(loop), SE(SE) {}
  242. bool follow(const SCEV *S) {
  243. if (const SCEVUnknown *SC = dyn_cast<SCEVUnknown>(S)) {
  244. // We've reached the leaf node of SCEV, it's most probably just a
  245. // variable. Now it's time to see if it corresponds to a global constant
  246. // global (in which case we can eliminate the load), or not.
  247. BaseAddress = SC->getValue();
  248. LoadCanBeConstantFolded =
  249. IndexIsConstant && isLoadFromConstantInitializer(BaseAddress);
  250. return false;
  251. }
  252. if (isa<SCEVConstant>(S))
  253. return true;
  254. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
  255. // If the current SCEV expression is AddRec, and its loop isn't the loop
  256. // we are about to unroll, then we won't get a constant address after
  257. // unrolling, and thus, won't be able to eliminate the load.
  258. if (AR->getLoop() != L)
  259. return IndexIsConstant = false;
  260. // If the step isn't constant, we won't get constant addresses in unrolled
  261. // version. Bail out.
  262. if (const SCEVConstant *StepSE =
  263. dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
  264. Step = StepSE->getValue()->getValue();
  265. else
  266. return IndexIsConstant = false;
  267. return IndexIsConstant;
  268. }
  269. // If Result is true, continue traversal.
  270. // Otherwise, we have found something that prevents us from (possible) load
  271. // elimination.
  272. return IndexIsConstant;
  273. }
  274. bool isDone() const { return !IndexIsConstant; }
  275. };
  276. // This class is used to get an estimate of the optimization effects that we
  277. // could get from complete loop unrolling. It comes from the fact that some
  278. // loads might be replaced with concrete constant values and that could trigger
  279. // a chain of instruction simplifications.
  280. //
  281. // E.g. we might have:
  282. // int a[] = {0, 1, 0};
  283. // v = 0;
  284. // for (i = 0; i < 3; i ++)
  285. // v += b[i]*a[i];
  286. // If we completely unroll the loop, we would get:
  287. // v = b[0]*a[0] + b[1]*a[1] + b[2]*a[2]
  288. // Which then will be simplified to:
  289. // v = b[0]* 0 + b[1]* 1 + b[2]* 0
  290. // And finally:
  291. // v = b[1]
  292. class UnrollAnalyzer : public InstVisitor<UnrollAnalyzer, bool> {
  293. typedef InstVisitor<UnrollAnalyzer, bool> Base;
  294. friend class InstVisitor<UnrollAnalyzer, bool>;
  295. const Loop *L;
  296. unsigned TripCount;
  297. ScalarEvolution &SE;
  298. const TargetTransformInfo &TTI;
  299. DenseMap<Value *, Constant *> SimplifiedValues;
  300. DenseMap<LoadInst *, Value *> LoadBaseAddresses;
  301. SmallPtrSet<Instruction *, 32> CountedInstructions;
  302. /// \brief Count the number of optimized instructions.
  303. unsigned NumberOfOptimizedInstructions;
  304. // Provide base case for our instruction visit.
  305. bool visitInstruction(Instruction &I) { return false; };
  306. // TODO: We should also visit ICmp, FCmp, GetElementPtr, Trunc, ZExt, SExt,
  307. // FPTrunc, FPExt, FPToUI, FPToSI, UIToFP, SIToFP, BitCast, Select,
  308. // ExtractElement, InsertElement, ShuffleVector, ExtractValue, InsertValue.
  309. //
  310. // Probaly it's worth to hoist the code for estimating the simplifications
  311. // effects to a separate class, since we have a very similar code in
  312. // InlineCost already.
  313. bool visitBinaryOperator(BinaryOperator &I) {
  314. Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
  315. if (!isa<Constant>(LHS))
  316. if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
  317. LHS = SimpleLHS;
  318. if (!isa<Constant>(RHS))
  319. if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
  320. RHS = SimpleRHS;
  321. Value *SimpleV = nullptr;
  322. if (auto FI = dyn_cast<FPMathOperator>(&I))
  323. SimpleV =
  324. SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags());
  325. else
  326. SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS);
  327. if (SimpleV && CountedInstructions.insert(&I).second)
  328. NumberOfOptimizedInstructions += TTI.getUserCost(&I);
  329. if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
  330. SimplifiedValues[&I] = C;
  331. return true;
  332. }
  333. return false;
  334. }
  335. Constant *computeLoadValue(LoadInst *LI, unsigned Iteration) {
  336. if (!LI)
  337. return nullptr;
  338. Value *BaseAddr = LoadBaseAddresses[LI];
  339. if (!BaseAddr)
  340. return nullptr;
  341. auto GV = dyn_cast<GlobalVariable>(BaseAddr);
  342. if (!GV)
  343. return nullptr;
  344. ConstantDataSequential *CDS =
  345. dyn_cast<ConstantDataSequential>(GV->getInitializer());
  346. if (!CDS)
  347. return nullptr;
  348. const SCEV *BaseAddrSE = SE.getSCEV(BaseAddr);
  349. const SCEV *S = SE.getSCEV(LI->getPointerOperand());
  350. const SCEV *OffSE = SE.getMinusSCEV(S, BaseAddrSE);
  351. APInt StepC, StartC;
  352. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(OffSE);
  353. if (!AR)
  354. return nullptr;
  355. if (const SCEVConstant *StepSE =
  356. dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
  357. StepC = StepSE->getValue()->getValue();
  358. else
  359. return nullptr;
  360. if (const SCEVConstant *StartSE = dyn_cast<SCEVConstant>(AR->getStart()))
  361. StartC = StartSE->getValue()->getValue();
  362. else
  363. return nullptr;
  364. unsigned ElemSize = CDS->getElementType()->getPrimitiveSizeInBits() / 8U;
  365. unsigned Start = StartC.getLimitedValue();
  366. unsigned Step = StepC.getLimitedValue();
  367. unsigned Index = (Start + Step * Iteration) / ElemSize;
  368. if (Index >= CDS->getNumElements())
  369. return nullptr;
  370. Constant *CV = CDS->getElementAsConstant(Index);
  371. return CV;
  372. }
  373. public:
  374. UnrollAnalyzer(const Loop *L, unsigned TripCount, ScalarEvolution &SE,
  375. const TargetTransformInfo &TTI)
  376. : L(L), TripCount(TripCount), SE(SE), TTI(TTI),
  377. NumberOfOptimizedInstructions(0) {}
  378. // Visit all loads the loop L, and for those that, after complete loop
  379. // unrolling, would have a constant address and it will point to a known
  380. // constant initializer, record its base address for future use. It is used
  381. // when we estimate number of potentially simplified instructions.
  382. void findConstFoldableLoads() {
  383. for (auto BB : L->getBlocks()) {
  384. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
  385. if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
  386. if (!LI->isSimple())
  387. continue;
  388. Value *AddrOp = LI->getPointerOperand();
  389. const SCEV *S = SE.getSCEV(AddrOp);
  390. FindConstantPointers Visitor(L, SE);
  391. SCEVTraversal<FindConstantPointers> T(Visitor);
  392. T.visitAll(S);
  393. if (Visitor.IndexIsConstant && Visitor.LoadCanBeConstantFolded) {
  394. LoadBaseAddresses[LI] = Visitor.BaseAddress;
  395. }
  396. }
  397. }
  398. }
  399. }
  400. // Given a list of loads that could be constant-folded (LoadBaseAddresses),
  401. // estimate number of optimized instructions after substituting the concrete
  402. // values for the given Iteration. Also track how many instructions become
  403. // dead through this process.
  404. unsigned estimateNumberOfOptimizedInstructions(unsigned Iteration) {
  405. // We keep a set vector for the worklist so that we don't wast space in the
  406. // worklist queuing up the same instruction repeatedly. This can happen due
  407. // to multiple operands being the same instruction or due to the same
  408. // instruction being an operand of lots of things that end up dead or
  409. // simplified.
  410. SmallSetVector<Instruction *, 8> Worklist;
  411. // Clear the simplified values and counts for this iteration.
  412. SimplifiedValues.clear();
  413. CountedInstructions.clear();
  414. NumberOfOptimizedInstructions = 0;
  415. // We start by adding all loads to the worklist.
  416. for (auto &LoadDescr : LoadBaseAddresses) {
  417. LoadInst *LI = LoadDescr.first;
  418. SimplifiedValues[LI] = computeLoadValue(LI, Iteration);
  419. if (CountedInstructions.insert(LI).second)
  420. NumberOfOptimizedInstructions += TTI.getUserCost(LI);
  421. for (User *U : LI->users())
  422. Worklist.insert(cast<Instruction>(U));
  423. }
  424. // And then we try to simplify every user of every instruction from the
  425. // worklist. If we do simplify a user, add it to the worklist to process
  426. // its users as well.
  427. while (!Worklist.empty()) {
  428. Instruction *I = Worklist.pop_back_val();
  429. if (!L->contains(I))
  430. continue;
  431. if (!visit(I))
  432. continue;
  433. for (User *U : I->users())
  434. Worklist.insert(cast<Instruction>(U));
  435. }
  436. // Now that we know the potentially simplifed instructions, estimate number
  437. // of instructions that would become dead if we do perform the
  438. // simplification.
  439. // The dead instructions are held in a separate set. This is used to
  440. // prevent us from re-examining instructions and make sure we only count
  441. // the benifit once. The worklist's internal set handles insertion
  442. // deduplication.
  443. SmallPtrSet<Instruction *, 16> DeadInstructions;
  444. // Lambda to enque operands onto the worklist.
  445. auto EnqueueOperands = [&](Instruction &I) {
  446. for (auto *Op : I.operand_values())
  447. if (auto *OpI = dyn_cast<Instruction>(Op))
  448. if (!OpI->use_empty())
  449. Worklist.insert(OpI);
  450. };
  451. // Start by initializing worklist with simplified instructions.
  452. for (auto &FoldedKeyValue : SimplifiedValues)
  453. if (auto *FoldedInst = dyn_cast<Instruction>(FoldedKeyValue.first)) {
  454. DeadInstructions.insert(FoldedInst);
  455. // Add each instruction operand of this dead instruction to the
  456. // worklist.
  457. EnqueueOperands(*FoldedInst);
  458. }
  459. // If a definition of an insn is only used by simplified or dead
  460. // instructions, it's also dead. Check defs of all instructions from the
  461. // worklist.
  462. while (!Worklist.empty()) {
  463. Instruction *I = Worklist.pop_back_val();
  464. if (!L->contains(I))
  465. continue;
  466. if (DeadInstructions.count(I))
  467. continue;
  468. if (std::all_of(I->user_begin(), I->user_end(), [&](User *U) {
  469. return DeadInstructions.count(cast<Instruction>(U));
  470. })) {
  471. NumberOfOptimizedInstructions += TTI.getUserCost(I);
  472. DeadInstructions.insert(I);
  473. EnqueueOperands(*I);
  474. }
  475. }
  476. return NumberOfOptimizedInstructions;
  477. }
  478. };
  479. // Complete loop unrolling can make some loads constant, and we need to know if
  480. // that would expose any further optimization opportunities.
  481. // This routine estimates this optimization effect and returns the number of
  482. // instructions, that potentially might be optimized away.
  483. static unsigned
  484. approximateNumberOfOptimizedInstructions(const Loop *L, ScalarEvolution &SE,
  485. unsigned TripCount,
  486. const TargetTransformInfo &TTI) {
  487. if (!TripCount || !UnrollMaxIterationsCountToAnalyze)
  488. return 0;
  489. UnrollAnalyzer UA(L, TripCount, SE, TTI);
  490. UA.findConstFoldableLoads();
  491. // Estimate number of instructions, that could be simplified if we replace a
  492. // load with the corresponding constant. Since the same load will take
  493. // different values on different iterations, we have to go through all loop's
  494. // iterations here. To limit ourselves here, we check only first N
  495. // iterations, and then scale the found number, if necessary.
  496. unsigned IterationsNumberForEstimate =
  497. std::min<unsigned>(UnrollMaxIterationsCountToAnalyze, TripCount);
  498. unsigned NumberOfOptimizedInstructions = 0;
  499. for (unsigned i = 0; i < IterationsNumberForEstimate; ++i)
  500. NumberOfOptimizedInstructions +=
  501. UA.estimateNumberOfOptimizedInstructions(i);
  502. NumberOfOptimizedInstructions *= TripCount / IterationsNumberForEstimate;
  503. return NumberOfOptimizedInstructions;
  504. }
  505. /// ApproximateLoopSize - Approximate the size of the loop.
  506. static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
  507. bool &NotDuplicatable,
  508. const TargetTransformInfo &TTI,
  509. AssumptionCache *AC) {
  510. SmallPtrSet<const Value *, 32> EphValues;
  511. CodeMetrics::collectEphemeralValues(L, AC, EphValues);
  512. CodeMetrics Metrics;
  513. for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
  514. I != E; ++I)
  515. Metrics.analyzeBasicBlock(*I, TTI, EphValues);
  516. NumCalls = Metrics.NumInlineCandidates;
  517. NotDuplicatable = Metrics.notDuplicatable;
  518. unsigned LoopSize = Metrics.NumInsts;
  519. // Don't allow an estimate of size zero. This would allows unrolling of loops
  520. // with huge iteration counts, which is a compile time problem even if it's
  521. // not a problem for code quality. Also, the code using this size may assume
  522. // that each loop has at least three instructions (likely a conditional
  523. // branch, a comparison feeding that branch, and some kind of loop increment
  524. // feeding that comparison instruction).
  525. LoopSize = std::max(LoopSize, 3u);
  526. return LoopSize;
  527. }
  528. // Returns the loop hint metadata node with the given name (for example,
  529. // "llvm.loop.unroll.count"). If no such metadata node exists, then nullptr is
  530. // returned.
  531. static MDNode *GetUnrollMetadataForLoop(const Loop *L, StringRef Name) {
  532. if (MDNode *LoopID = L->getLoopID())
  533. return GetUnrollMetadata(LoopID, Name);
  534. return nullptr;
  535. }
  536. // Returns true if the loop has an unroll(full) pragma.
  537. static bool HasUnrollFullPragma(const Loop *L) {
  538. return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.full");
  539. }
  540. // Returns true if the loop has an unroll(disable) pragma.
  541. static bool HasUnrollDisablePragma(const Loop *L) {
  542. return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.disable");
  543. }
  544. // If loop has an unroll_count pragma return the (necessarily
  545. // positive) value from the pragma. Otherwise return 0.
  546. static unsigned UnrollCountPragmaValue(const Loop *L) {
  547. MDNode *MD = GetUnrollMetadataForLoop(L, "llvm.loop.unroll.count");
  548. if (MD) {
  549. assert(MD->getNumOperands() == 2 &&
  550. "Unroll count hint metadata should have two operands.");
  551. unsigned Count =
  552. mdconst::extract<ConstantInt>(MD->getOperand(1))->getZExtValue();
  553. assert(Count >= 1 && "Unroll count must be positive.");
  554. return Count;
  555. }
  556. return 0;
  557. }
  558. // Remove existing unroll metadata and add unroll disable metadata to
  559. // indicate the loop has already been unrolled. This prevents a loop
  560. // from being unrolled more than is directed by a pragma if the loop
  561. // unrolling pass is run more than once (which it generally is).
  562. static void SetLoopAlreadyUnrolled(Loop *L) {
  563. MDNode *LoopID = L->getLoopID();
  564. if (!LoopID) return;
  565. // First remove any existing loop unrolling metadata.
  566. SmallVector<Metadata *, 4> MDs;
  567. // Reserve first location for self reference to the LoopID metadata node.
  568. MDs.push_back(nullptr);
  569. for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
  570. bool IsUnrollMetadata = false;
  571. MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
  572. if (MD) {
  573. const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
  574. IsUnrollMetadata = S && S->getString().startswith("llvm.loop.unroll.");
  575. }
  576. if (!IsUnrollMetadata)
  577. MDs.push_back(LoopID->getOperand(i));
  578. }
  579. // Add unroll(disable) metadata to disable future unrolling.
  580. LLVMContext &Context = L->getHeader()->getContext();
  581. SmallVector<Metadata *, 1> DisableOperands;
  582. DisableOperands.push_back(MDString::get(Context, "llvm.loop.unroll.disable"));
  583. MDNode *DisableNode = MDNode::get(Context, DisableOperands);
  584. MDs.push_back(DisableNode);
  585. MDNode *NewLoopID = MDNode::get(Context, MDs);
  586. // Set operand 0 to refer to the loop id itself.
  587. NewLoopID->replaceOperandWith(0, NewLoopID);
  588. L->setLoopID(NewLoopID);
  589. }
  590. unsigned LoopUnroll::selectUnrollCount(
  591. const Loop *L, unsigned TripCount, bool PragmaFullUnroll,
  592. unsigned PragmaCount, const TargetTransformInfo::UnrollingPreferences &UP,
  593. bool &SetExplicitly) {
  594. SetExplicitly = true;
  595. // User-specified count (either as a command-line option or
  596. // constructor parameter) has highest precedence.
  597. unsigned Count = UserCount ? CurrentCount : 0;
  598. // If there is no user-specified count, unroll pragmas have the next
  599. // highest precendence.
  600. if (Count == 0) {
  601. if (PragmaCount) {
  602. Count = PragmaCount;
  603. } else if (PragmaFullUnroll) {
  604. Count = TripCount;
  605. }
  606. }
  607. if (Count == 0)
  608. Count = UP.Count;
  609. if (Count == 0) {
  610. SetExplicitly = false;
  611. if (TripCount == 0)
  612. // Runtime trip count.
  613. Count = UnrollRuntimeCount;
  614. else
  615. // Conservative heuristic: if we know the trip count, see if we can
  616. // completely unroll (subject to the threshold, checked below); otherwise
  617. // try to find greatest modulo of the trip count which is still under
  618. // threshold value.
  619. Count = TripCount;
  620. }
  621. if (TripCount && Count > TripCount)
  622. return TripCount;
  623. return Count;
  624. }
  625. bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
  626. if (skipOptnoneFunction(L))
  627. return false;
  628. Function &F = *L->getHeader()->getParent();
  629. LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  630. ScalarEvolution *SE = &getAnalysis<ScalarEvolution>();
  631. const TargetTransformInfo &TTI =
  632. getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
  633. auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  634. BasicBlock *Header = L->getHeader();
  635. DEBUG(dbgs() << "Loop Unroll: F[" << Header->getParent()->getName()
  636. << "] Loop %" << Header->getName() << "\n");
  637. if (HasUnrollDisablePragma(L)) {
  638. return false;
  639. }
  640. bool PragmaFullUnroll = HasUnrollFullPragma(L);
  641. unsigned PragmaCount = UnrollCountPragmaValue(L);
  642. bool HasPragma = PragmaFullUnroll || PragmaCount > 0;
  643. TargetTransformInfo::UnrollingPreferences UP;
  644. getUnrollingPreferences(L, TTI, UP);
  645. // Find trip count and trip multiple if count is not available
  646. unsigned TripCount = 0;
  647. unsigned TripMultiple = 1;
  648. // If there are multiple exiting blocks but one of them is the latch, use the
  649. // latch for the trip count estimation. Otherwise insist on a single exiting
  650. // block for the trip count estimation.
  651. BasicBlock *ExitingBlock = L->getLoopLatch();
  652. if (!ExitingBlock || !L->isLoopExiting(ExitingBlock))
  653. ExitingBlock = L->getExitingBlock();
  654. if (ExitingBlock) {
  655. TripCount = SE->getSmallConstantTripCount(L, ExitingBlock);
  656. TripMultiple = SE->getSmallConstantTripMultiple(L, ExitingBlock);
  657. }
  658. // Select an initial unroll count. This may be reduced later based
  659. // on size thresholds.
  660. bool CountSetExplicitly;
  661. unsigned Count = selectUnrollCount(L, TripCount, PragmaFullUnroll,
  662. PragmaCount, UP, CountSetExplicitly);
  663. unsigned NumInlineCandidates;
  664. bool notDuplicatable;
  665. unsigned LoopSize =
  666. ApproximateLoopSize(L, NumInlineCandidates, notDuplicatable, TTI, &AC);
  667. DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
  668. // When computing the unrolled size, note that the conditional branch on the
  669. // backedge and the comparison feeding it are not replicated like the rest of
  670. // the loop body (which is why 2 is subtracted).
  671. uint64_t UnrolledSize = (uint64_t)(LoopSize-2) * Count + 2;
  672. if (notDuplicatable) {
  673. DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable"
  674. << " instructions.\n");
  675. return false;
  676. }
  677. if (NumInlineCandidates != 0) {
  678. DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
  679. return false;
  680. }
  681. unsigned NumberOfOptimizedInstructions =
  682. approximateNumberOfOptimizedInstructions(L, *SE, TripCount, TTI);
  683. DEBUG(dbgs() << " Complete unrolling could save: "
  684. << NumberOfOptimizedInstructions << "\n");
  685. unsigned Threshold, PartialThreshold;
  686. selectThresholds(L, HasPragma, UP, Threshold, PartialThreshold,
  687. NumberOfOptimizedInstructions);
  688. // Given Count, TripCount and thresholds determine the type of
  689. // unrolling which is to be performed.
  690. enum { Full = 0, Partial = 1, Runtime = 2 };
  691. int Unrolling;
  692. if (TripCount && Count == TripCount) {
  693. if (Threshold != NoThreshold && UnrolledSize > Threshold) {
  694. DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
  695. << " because size: " << UnrolledSize << ">" << Threshold
  696. << "\n");
  697. Unrolling = Partial;
  698. } else {
  699. Unrolling = Full;
  700. }
  701. } else if (TripCount && Count < TripCount) {
  702. Unrolling = Partial;
  703. } else {
  704. Unrolling = Runtime;
  705. }
  706. // Reduce count based on the type of unrolling and the threshold values.
  707. unsigned OriginalCount = Count;
  708. bool AllowRuntime = UserRuntime ? CurrentRuntime : UP.Runtime;
  709. if (Unrolling == Partial) {
  710. bool AllowPartial = UserAllowPartial ? CurrentAllowPartial : UP.Partial;
  711. if (!AllowPartial && !CountSetExplicitly) {
  712. DEBUG(dbgs() << " will not try to unroll partially because "
  713. << "-unroll-allow-partial not given\n");
  714. return false;
  715. }
  716. if (PartialThreshold != NoThreshold && UnrolledSize > PartialThreshold) {
  717. // Reduce unroll count to be modulo of TripCount for partial unrolling.
  718. Count = (std::max(PartialThreshold, 3u)-2) / (LoopSize-2);
  719. while (Count != 0 && TripCount % Count != 0)
  720. Count--;
  721. }
  722. } else if (Unrolling == Runtime) {
  723. if (!AllowRuntime && !CountSetExplicitly) {
  724. DEBUG(dbgs() << " will not try to unroll loop with runtime trip count "
  725. << "-unroll-runtime not given\n");
  726. return false;
  727. }
  728. // Reduce unroll count to be the largest power-of-two factor of
  729. // the original count which satisfies the threshold limit.
  730. while (Count != 0 && UnrolledSize > PartialThreshold) {
  731. Count >>= 1;
  732. UnrolledSize = (LoopSize-2) * Count + 2;
  733. }
  734. if (Count > UP.MaxCount)
  735. Count = UP.MaxCount;
  736. DEBUG(dbgs() << " partially unrolling with count: " << Count << "\n");
  737. }
  738. if (HasPragma) {
  739. if (PragmaCount != 0)
  740. // If loop has an unroll count pragma mark loop as unrolled to prevent
  741. // unrolling beyond that requested by the pragma.
  742. SetLoopAlreadyUnrolled(L);
  743. // Emit optimization remarks if we are unable to unroll the loop
  744. // as directed by a pragma.
  745. DebugLoc LoopLoc = L->getStartLoc();
  746. Function *F = Header->getParent();
  747. LLVMContext &Ctx = F->getContext();
  748. if (PragmaFullUnroll && PragmaCount == 0) {
  749. if (TripCount && Count != TripCount) {
  750. emitOptimizationRemarkMissed(
  751. Ctx, DEBUG_TYPE, *F, LoopLoc,
  752. "Unable to fully unroll loop as directed by unroll(full) pragma "
  753. "because unrolled size is too large.");
  754. } else if (!TripCount) {
  755. emitOptimizationRemarkMissed(
  756. Ctx, DEBUG_TYPE, *F, LoopLoc,
  757. "Unable to fully unroll loop as directed by unroll(full) pragma "
  758. "because loop has a runtime trip count.");
  759. }
  760. } else if (PragmaCount > 0 && Count != OriginalCount) {
  761. emitOptimizationRemarkMissed(
  762. Ctx, DEBUG_TYPE, *F, LoopLoc,
  763. "Unable to unroll loop the number of times directed by "
  764. "unroll_count pragma because unrolled size is too large.");
  765. }
  766. }
  767. if (Unrolling != Full && Count < 2) {
  768. // Partial unrolling by 1 is a nop. For full unrolling, a factor
  769. // of 1 makes sense because loop control can be eliminated.
  770. return false;
  771. }
  772. // Unroll the loop.
  773. if (!UnrollLoop(L, Count, TripCount, AllowRuntime, TripMultiple, LI, this,
  774. &LPM, &AC))
  775. return false;
  776. return true;
  777. }