PromoteMemoryToRegister.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file was developed by the LLVM research group and is distributed under
  6. // the University of Illinois Open Source License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file promote memory references to be register references. It promotes
  11. // alloca instructions which only have loads and stores as uses. An alloca is
  12. // transformed by using dominator frontiers to place PHI nodes, then traversing
  13. // the function in depth-first order to rewrite loads and stores as appropriate.
  14. // This is just the standard SSA construction algorithm to construct "pruned"
  15. // SSA form.
  16. //
  17. //===----------------------------------------------------------------------===//
  18. #include "llvm/Transforms/Utils/PromoteMemToReg.h"
  19. #include "llvm/Constants.h"
  20. #include "llvm/DerivedTypes.h"
  21. #include "llvm/Function.h"
  22. #include "llvm/Instructions.h"
  23. #include "llvm/Analysis/Dominators.h"
  24. #include "llvm/Analysis/AliasSetTracker.h"
  25. #include "llvm/ADT/StringExtras.h"
  26. #include "llvm/Transforms/Utils/Local.h"
  27. #include "llvm/Support/CFG.h"
  28. #include "llvm/Support/StableBasicBlockNumbering.h"
  29. #include <algorithm>
  30. using namespace llvm;
  31. /// isAllocaPromotable - Return true if this alloca is legal for promotion.
  32. /// This is true if there are only loads and stores to the alloca.
  33. ///
  34. bool llvm::isAllocaPromotable(const AllocaInst *AI, const TargetData &TD) {
  35. // FIXME: If the memory unit is of pointer or integer type, we can permit
  36. // assignments to subsections of the memory unit.
  37. // Only allow direct loads and stores...
  38. for (Value::use_const_iterator UI = AI->use_begin(), UE = AI->use_end();
  39. UI != UE; ++UI) // Loop over all of the uses of the alloca
  40. if (isa<LoadInst>(*UI)) {
  41. // noop
  42. } else if (const StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
  43. if (SI->getOperand(0) == AI)
  44. return false; // Don't allow a store OF the AI, only INTO the AI.
  45. } else {
  46. return false; // Not a load or store.
  47. }
  48. return true;
  49. }
  50. namespace {
  51. struct PromoteMem2Reg {
  52. /// Allocas - The alloca instructions being promoted.
  53. ///
  54. std::vector<AllocaInst*> Allocas;
  55. std::vector<AllocaInst*> &RetryList;
  56. DominatorTree &DT;
  57. DominanceFrontier &DF;
  58. const TargetData &TD;
  59. /// AST - An AliasSetTracker object to update. If null, don't update it.
  60. ///
  61. AliasSetTracker *AST;
  62. /// AllocaLookup - Reverse mapping of Allocas.
  63. ///
  64. std::map<AllocaInst*, unsigned> AllocaLookup;
  65. /// NewPhiNodes - The PhiNodes we're adding.
  66. ///
  67. std::map<BasicBlock*, std::vector<PHINode*> > NewPhiNodes;
  68. /// PointerAllocaValues - If we are updating an AliasSetTracker, then for
  69. /// each alloca that is of pointer type, we keep track of what to copyValue
  70. /// to the inserted PHI nodes here.
  71. ///
  72. std::vector<Value*> PointerAllocaValues;
  73. /// Visited - The set of basic blocks the renamer has already visited.
  74. ///
  75. std::set<BasicBlock*> Visited;
  76. /// BBNumbers - Contains a stable numbering of basic blocks to avoid
  77. /// non-determinstic behavior.
  78. StableBasicBlockNumbering BBNumbers;
  79. public:
  80. PromoteMem2Reg(const std::vector<AllocaInst*> &A,
  81. std::vector<AllocaInst*> &Retry, DominatorTree &dt,
  82. DominanceFrontier &df, const TargetData &td,
  83. AliasSetTracker *ast)
  84. : Allocas(A), RetryList(Retry), DT(dt), DF(df), TD(td), AST(ast) {}
  85. void run();
  86. /// dominates - Return true if I1 dominates I2 using the DominatorTree.
  87. ///
  88. bool dominates(Instruction *I1, Instruction *I2) const {
  89. if (InvokeInst *II = dyn_cast<InvokeInst>(I1))
  90. I1 = II->getNormalDest()->begin();
  91. return DT[I1->getParent()]->dominates(DT[I2->getParent()]);
  92. }
  93. private:
  94. void MarkDominatingPHILive(BasicBlock *BB, unsigned AllocaNum,
  95. std::set<PHINode*> &DeadPHINodes);
  96. bool PromoteLocallyUsedAlloca(BasicBlock *BB, AllocaInst *AI);
  97. void PromoteLocallyUsedAllocas(BasicBlock *BB,
  98. const std::vector<AllocaInst*> &AIs);
  99. void RenamePass(BasicBlock *BB, BasicBlock *Pred,
  100. std::vector<Value*> &IncVals);
  101. bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version,
  102. std::set<PHINode*> &InsertedPHINodes);
  103. };
  104. } // end of anonymous namespace
  105. void PromoteMem2Reg::run() {
  106. Function &F = *DF.getRoot()->getParent();
  107. // LocallyUsedAllocas - Keep track of all of the alloca instructions which are
  108. // only used in a single basic block. These instructions can be efficiently
  109. // promoted by performing a single linear scan over that one block. Since
  110. // individual basic blocks are sometimes large, we group together all allocas
  111. // that are live in a single basic block by the basic block they are live in.
  112. std::map<BasicBlock*, std::vector<AllocaInst*> > LocallyUsedAllocas;
  113. if (AST) PointerAllocaValues.resize(Allocas.size());
  114. for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
  115. AllocaInst *AI = Allocas[AllocaNum];
  116. assert(isAllocaPromotable(AI, TD) &&
  117. "Cannot promote non-promotable alloca!");
  118. assert(AI->getParent()->getParent() == &F &&
  119. "All allocas should be in the same function, which is same as DF!");
  120. if (AI->use_empty()) {
  121. // If there are no uses of the alloca, just delete it now.
  122. if (AST) AST->deleteValue(AI);
  123. AI->getParent()->getInstList().erase(AI);
  124. // Remove the alloca from the Allocas list, since it has been processed
  125. Allocas[AllocaNum] = Allocas.back();
  126. Allocas.pop_back();
  127. --AllocaNum;
  128. continue;
  129. }
  130. // Calculate the set of read and write-locations for each alloca. This is
  131. // analogous to finding the 'uses' and 'definitions' of each variable.
  132. std::vector<BasicBlock*> DefiningBlocks;
  133. std::vector<BasicBlock*> UsingBlocks;
  134. BasicBlock *OnlyBlock = 0;
  135. bool OnlyUsedInOneBlock = true;
  136. // As we scan the uses of the alloca instruction, keep track of stores, and
  137. // decide whether all of the loads and stores to the alloca are within the
  138. // same basic block.
  139. Value *AllocaPointerVal = 0;
  140. for (Value::use_iterator U =AI->use_begin(), E = AI->use_end(); U != E;++U){
  141. Instruction *User = cast<Instruction>(*U);
  142. if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
  143. // Remember the basic blocks which define new values for the alloca
  144. DefiningBlocks.push_back(SI->getParent());
  145. AllocaPointerVal = SI->getOperand(0);
  146. } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
  147. // Otherwise it must be a load instruction, keep track of variable reads
  148. UsingBlocks.push_back(LI->getParent());
  149. AllocaPointerVal = LI;
  150. }
  151. if (OnlyUsedInOneBlock) {
  152. if (OnlyBlock == 0)
  153. OnlyBlock = User->getParent();
  154. else if (OnlyBlock != User->getParent())
  155. OnlyUsedInOneBlock = false;
  156. }
  157. }
  158. // If the alloca is only read and written in one basic block, just perform a
  159. // linear sweep over the block to eliminate it.
  160. if (OnlyUsedInOneBlock) {
  161. LocallyUsedAllocas[OnlyBlock].push_back(AI);
  162. // Remove the alloca from the Allocas list, since it will be processed.
  163. Allocas[AllocaNum] = Allocas.back();
  164. Allocas.pop_back();
  165. --AllocaNum;
  166. continue;
  167. }
  168. if (AST)
  169. PointerAllocaValues[AllocaNum] = AllocaPointerVal;
  170. // If we haven't computed a numbering for the BB's in the function, do so
  171. // now.
  172. BBNumbers.compute(F);
  173. // Compute the locations where PhiNodes need to be inserted. Look at the
  174. // dominance frontier of EACH basic-block we have a write in.
  175. //
  176. unsigned CurrentVersion = 0;
  177. std::set<PHINode*> InsertedPHINodes;
  178. std::vector<unsigned> DFBlocks;
  179. while (!DefiningBlocks.empty()) {
  180. BasicBlock *BB = DefiningBlocks.back();
  181. DefiningBlocks.pop_back();
  182. // Look up the DF for this write, add it to PhiNodes
  183. DominanceFrontier::const_iterator it = DF.find(BB);
  184. if (it != DF.end()) {
  185. const DominanceFrontier::DomSetType &S = it->second;
  186. // In theory we don't need the indirection through the DFBlocks vector.
  187. // In practice, the order of calling QueuePhiNode would depend on the
  188. // (unspecified) ordering of basic blocks in the dominance frontier,
  189. // which would give PHI nodes non-determinstic subscripts. Fix this by
  190. // processing blocks in order of the occurance in the function.
  191. for (DominanceFrontier::DomSetType::const_iterator P = S.begin(),
  192. PE = S.end(); P != PE; ++P)
  193. DFBlocks.push_back(BBNumbers.getNumber(*P));
  194. // Sort by which the block ordering in the function.
  195. std::sort(DFBlocks.begin(), DFBlocks.end());
  196. for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i) {
  197. BasicBlock *BB = BBNumbers.getBlock(DFBlocks[i]);
  198. if (QueuePhiNode(BB, AllocaNum, CurrentVersion, InsertedPHINodes))
  199. DefiningBlocks.push_back(BB);
  200. }
  201. DFBlocks.clear();
  202. }
  203. }
  204. // Now that we have inserted PHI nodes along the Iterated Dominance Frontier
  205. // of the writes to the variable, scan through the reads of the variable,
  206. // marking PHI nodes which are actually necessary as alive (by removing them
  207. // from the InsertedPHINodes set). This is not perfect: there may PHI
  208. // marked alive because of loads which are dominated by stores, but there
  209. // will be no unmarked PHI nodes which are actually used.
  210. //
  211. for (unsigned i = 0, e = UsingBlocks.size(); i != e; ++i)
  212. MarkDominatingPHILive(UsingBlocks[i], AllocaNum, InsertedPHINodes);
  213. UsingBlocks.clear();
  214. // If there are any PHI nodes which are now known to be dead, remove them!
  215. for (std::set<PHINode*>::iterator I = InsertedPHINodes.begin(),
  216. E = InsertedPHINodes.end(); I != E; ++I) {
  217. PHINode *PN = *I;
  218. std::vector<PHINode*> &BBPNs = NewPhiNodes[PN->getParent()];
  219. BBPNs[AllocaNum] = 0;
  220. // Check to see if we just removed the last inserted PHI node from this
  221. // basic block. If so, remove the entry for the basic block.
  222. bool HasOtherPHIs = false;
  223. for (unsigned i = 0, e = BBPNs.size(); i != e; ++i)
  224. if (BBPNs[i]) {
  225. HasOtherPHIs = true;
  226. break;
  227. }
  228. if (!HasOtherPHIs)
  229. NewPhiNodes.erase(PN->getParent());
  230. if (AST && isa<PointerType>(PN->getType()))
  231. AST->deleteValue(PN);
  232. PN->getParent()->getInstList().erase(PN);
  233. }
  234. // Keep the reverse mapping of the 'Allocas' array.
  235. AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
  236. }
  237. // Process all allocas which are only used in a single basic block.
  238. for (std::map<BasicBlock*, std::vector<AllocaInst*> >::iterator I =
  239. LocallyUsedAllocas.begin(), E = LocallyUsedAllocas.end(); I != E; ++I){
  240. const std::vector<AllocaInst*> &LocAllocas = I->second;
  241. assert(!LocAllocas.empty() && "empty alloca list??");
  242. // It's common for there to only be one alloca in the list. Handle it
  243. // efficiently.
  244. if (LocAllocas.size() == 1) {
  245. // If we can do the quick promotion pass, do so now.
  246. if (PromoteLocallyUsedAlloca(I->first, LocAllocas[0]))
  247. RetryList.push_back(LocAllocas[0]); // Failed, retry later.
  248. } else {
  249. // Locally promote anything possible. Note that if this is unable to
  250. // promote a particular alloca, it puts the alloca onto the Allocas vector
  251. // for global processing.
  252. PromoteLocallyUsedAllocas(I->first, LocAllocas);
  253. }
  254. }
  255. if (Allocas.empty())
  256. return; // All of the allocas must have been trivial!
  257. // Set the incoming values for the basic block to be null values for all of
  258. // the alloca's. We do this in case there is a load of a value that has not
  259. // been stored yet. In this case, it will get this null value.
  260. //
  261. std::vector<Value *> Values(Allocas.size());
  262. for (unsigned i = 0, e = Allocas.size(); i != e; ++i)
  263. Values[i] = UndefValue::get(Allocas[i]->getAllocatedType());
  264. // Walks all basic blocks in the function performing the SSA rename algorithm
  265. // and inserting the phi nodes we marked as necessary
  266. //
  267. RenamePass(F.begin(), 0, Values);
  268. // The renamer uses the Visited set to avoid infinite loops. Clear it now.
  269. Visited.clear();
  270. // Remove the allocas themselves from the function...
  271. for (unsigned i = 0, e = Allocas.size(); i != e; ++i) {
  272. Instruction *A = Allocas[i];
  273. // If there are any uses of the alloca instructions left, they must be in
  274. // sections of dead code that were not processed on the dominance frontier.
  275. // Just delete the users now.
  276. //
  277. if (!A->use_empty())
  278. A->replaceAllUsesWith(UndefValue::get(A->getType()));
  279. if (AST) AST->deleteValue(A);
  280. A->getParent()->getInstList().erase(A);
  281. }
  282. // At this point, the renamer has added entries to PHI nodes for all reachable
  283. // code. Unfortunately, there may be blocks which are not reachable, which
  284. // the renamer hasn't traversed. If this is the case, the PHI nodes may not
  285. // have incoming values for all predecessors. Loop over all PHI nodes we have
  286. // created, inserting undef values if they are missing any incoming values.
  287. //
  288. for (std::map<BasicBlock*, std::vector<PHINode *> >::iterator I =
  289. NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E; ++I) {
  290. std::vector<BasicBlock*> Preds(pred_begin(I->first), pred_end(I->first));
  291. std::vector<PHINode*> &PNs = I->second;
  292. assert(!PNs.empty() && "Empty PHI node list??");
  293. // Loop over all of the PHI nodes and see if there are any that we can get
  294. // rid of because they merge all of the same incoming values. This can
  295. // happen due to undef values coming into the PHI nodes.
  296. PHINode *SomePHI = 0;
  297. for (unsigned i = 0, e = PNs.size(); i != e; ++i)
  298. if (PNs[i]) {
  299. if (Value *V = hasConstantValue(PNs[i])) {
  300. if (!isa<Instruction>(V) || dominates(cast<Instruction>(V), PNs[i])) {
  301. if (AST && isa<PointerType>(PNs[i]->getType()))
  302. AST->deleteValue(PNs[i]);
  303. PNs[i]->replaceAllUsesWith(V);
  304. PNs[i]->eraseFromParent();
  305. PNs[i] = 0;
  306. }
  307. }
  308. if (PNs[i])
  309. SomePHI = PNs[i];
  310. }
  311. // Only do work here if there the PHI nodes are missing incoming values. We
  312. // know that all PHI nodes that were inserted in a block will have the same
  313. // number of incoming values, so we can just check any PHI node.
  314. if (SomePHI && Preds.size() != SomePHI->getNumIncomingValues()) {
  315. // Ok, now we know that all of the PHI nodes are missing entries for some
  316. // basic blocks. Start by sorting the incoming predecessors for efficient
  317. // access.
  318. std::sort(Preds.begin(), Preds.end());
  319. // Now we loop through all BB's which have entries in SomePHI and remove
  320. // them from the Preds list.
  321. for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
  322. // Do a log(n) search of the Preds list for the entry we want.
  323. std::vector<BasicBlock*>::iterator EntIt =
  324. std::lower_bound(Preds.begin(), Preds.end(),
  325. SomePHI->getIncomingBlock(i));
  326. assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i)&&
  327. "PHI node has entry for a block which is not a predecessor!");
  328. // Remove the entry
  329. Preds.erase(EntIt);
  330. }
  331. // At this point, the blocks left in the preds list must have dummy
  332. // entries inserted into every PHI nodes for the block.
  333. for (unsigned i = 0, e = PNs.size(); i != e; ++i)
  334. if (PHINode *PN = PNs[i]) {
  335. Value *UndefVal = UndefValue::get(PN->getType());
  336. for (unsigned pred = 0, e = Preds.size(); pred != e; ++pred)
  337. PN->addIncoming(UndefVal, Preds[pred]);
  338. }
  339. }
  340. }
  341. }
  342. // MarkDominatingPHILive - Mem2Reg wants to construct "pruned" SSA form, not
  343. // "minimal" SSA form. To do this, it inserts all of the PHI nodes on the IDF
  344. // as usual (inserting the PHI nodes in the DeadPHINodes set), then processes
  345. // each read of the variable. For each block that reads the variable, this
  346. // function is called, which removes used PHI nodes from the DeadPHINodes set.
  347. // After all of the reads have been processed, any PHI nodes left in the
  348. // DeadPHINodes set are removed.
  349. //
  350. void PromoteMem2Reg::MarkDominatingPHILive(BasicBlock *BB, unsigned AllocaNum,
  351. std::set<PHINode*> &DeadPHINodes) {
  352. // Scan the immediate dominators of this block looking for a block which has a
  353. // PHI node for Alloca num. If we find it, mark the PHI node as being alive!
  354. for (DominatorTree::Node *N = DT[BB]; N; N = N->getIDom()) {
  355. BasicBlock *DomBB = N->getBlock();
  356. std::map<BasicBlock*, std::vector<PHINode*> >::iterator
  357. I = NewPhiNodes.find(DomBB);
  358. if (I != NewPhiNodes.end() && I->second[AllocaNum]) {
  359. // Ok, we found an inserted PHI node which dominates this value.
  360. PHINode *DominatingPHI = I->second[AllocaNum];
  361. // Find out if we previously thought it was dead.
  362. std::set<PHINode*>::iterator DPNI = DeadPHINodes.find(DominatingPHI);
  363. if (DPNI != DeadPHINodes.end()) {
  364. // Ok, until now, we thought this PHI node was dead. Mark it as being
  365. // alive/needed.
  366. DeadPHINodes.erase(DPNI);
  367. // Now that we have marked the PHI node alive, also mark any PHI nodes
  368. // which it might use as being alive as well.
  369. for (pred_iterator PI = pred_begin(DomBB), PE = pred_end(DomBB);
  370. PI != PE; ++PI)
  371. MarkDominatingPHILive(*PI, AllocaNum, DeadPHINodes);
  372. }
  373. }
  374. }
  375. }
  376. /// PromoteLocallyUsedAlloca - Many allocas are only used within a single basic
  377. /// block. If this is the case, avoid traversing the CFG and inserting a lot of
  378. /// potentially useless PHI nodes by just performing a single linear pass over
  379. /// the basic block using the Alloca.
  380. ///
  381. /// If we cannot promote this alloca (because it is read before it is written),
  382. /// return true. This is necessary in cases where, due to control flow, the
  383. /// alloca is potentially undefined on some control flow paths. e.g. code like
  384. /// this is potentially correct:
  385. ///
  386. /// for (...) { if (c) { A = undef; undef = B; } }
  387. ///
  388. /// ... so long as A is not used before undef is set.
  389. ///
  390. bool PromoteMem2Reg::PromoteLocallyUsedAlloca(BasicBlock *BB, AllocaInst *AI) {
  391. assert(!AI->use_empty() && "There are no uses of the alloca!");
  392. // Handle degenerate cases quickly.
  393. if (AI->hasOneUse()) {
  394. Instruction *U = cast<Instruction>(AI->use_back());
  395. if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
  396. // Must be a load of uninitialized value.
  397. LI->replaceAllUsesWith(UndefValue::get(AI->getAllocatedType()));
  398. if (AST && isa<PointerType>(LI->getType()))
  399. AST->deleteValue(LI);
  400. } else {
  401. // Otherwise it must be a store which is never read.
  402. assert(isa<StoreInst>(U));
  403. }
  404. BB->getInstList().erase(U);
  405. } else {
  406. // Uses of the uninitialized memory location shall get undef.
  407. Value *CurVal = 0;
  408. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
  409. Instruction *Inst = I++;
  410. if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
  411. if (LI->getOperand(0) == AI) {
  412. if (!CurVal) return true; // Could not locally promote!
  413. // Loads just returns the "current value"...
  414. LI->replaceAllUsesWith(CurVal);
  415. if (AST && isa<PointerType>(LI->getType()))
  416. AST->deleteValue(LI);
  417. BB->getInstList().erase(LI);
  418. }
  419. } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
  420. if (SI->getOperand(1) == AI) {
  421. // Store updates the "current value"...
  422. CurVal = SI->getOperand(0);
  423. BB->getInstList().erase(SI);
  424. }
  425. }
  426. }
  427. }
  428. // After traversing the basic block, there should be no more uses of the
  429. // alloca, remove it now.
  430. assert(AI->use_empty() && "Uses of alloca from more than one BB??");
  431. if (AST) AST->deleteValue(AI);
  432. AI->getParent()->getInstList().erase(AI);
  433. return false;
  434. }
  435. /// PromoteLocallyUsedAllocas - This method is just like
  436. /// PromoteLocallyUsedAlloca, except that it processes multiple alloca
  437. /// instructions in parallel. This is important in cases where we have large
  438. /// basic blocks, as we don't want to rescan the entire basic block for each
  439. /// alloca which is locally used in it (which might be a lot).
  440. void PromoteMem2Reg::
  441. PromoteLocallyUsedAllocas(BasicBlock *BB, const std::vector<AllocaInst*> &AIs) {
  442. std::map<AllocaInst*, Value*> CurValues;
  443. for (unsigned i = 0, e = AIs.size(); i != e; ++i)
  444. CurValues[AIs[i]] = 0; // Insert with null value
  445. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
  446. Instruction *Inst = I++;
  447. if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
  448. // Is this a load of an alloca we are tracking?
  449. if (AllocaInst *AI = dyn_cast<AllocaInst>(LI->getOperand(0))) {
  450. std::map<AllocaInst*, Value*>::iterator AIt = CurValues.find(AI);
  451. if (AIt != CurValues.end()) {
  452. // If loading an uninitialized value, allow the inter-block case to
  453. // handle it. Due to control flow, this might actually be ok.
  454. if (AIt->second == 0) { // Use of locally uninitialized value??
  455. RetryList.push_back(AI); // Retry elsewhere.
  456. CurValues.erase(AIt); // Stop tracking this here.
  457. if (CurValues.empty()) return;
  458. } else {
  459. // Loads just returns the "current value"...
  460. LI->replaceAllUsesWith(AIt->second);
  461. if (AST && isa<PointerType>(LI->getType()))
  462. AST->deleteValue(LI);
  463. BB->getInstList().erase(LI);
  464. }
  465. }
  466. }
  467. } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
  468. if (AllocaInst *AI = dyn_cast<AllocaInst>(SI->getOperand(1))) {
  469. std::map<AllocaInst*, Value*>::iterator AIt = CurValues.find(AI);
  470. if (AIt != CurValues.end()) {
  471. // Store updates the "current value"...
  472. AIt->second = SI->getOperand(0);
  473. BB->getInstList().erase(SI);
  474. }
  475. }
  476. }
  477. }
  478. }
  479. // QueuePhiNode - queues a phi-node to be added to a basic-block for a specific
  480. // Alloca returns true if there wasn't already a phi-node for that variable
  481. //
  482. bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo,
  483. unsigned &Version,
  484. std::set<PHINode*> &InsertedPHINodes) {
  485. // Look up the basic-block in question.
  486. std::vector<PHINode*> &BBPNs = NewPhiNodes[BB];
  487. if (BBPNs.empty()) BBPNs.resize(Allocas.size());
  488. // If the BB already has a phi node added for the i'th alloca then we're done!
  489. if (BBPNs[AllocaNo]) return false;
  490. // Create a PhiNode using the dereferenced type... and add the phi-node to the
  491. // BasicBlock.
  492. PHINode *PN = new PHINode(Allocas[AllocaNo]->getAllocatedType(),
  493. Allocas[AllocaNo]->getName() + "." +
  494. utostr(Version++), BB->begin());
  495. BBPNs[AllocaNo] = PN;
  496. InsertedPHINodes.insert(PN);
  497. if (AST && isa<PointerType>(PN->getType()))
  498. AST->copyValue(PointerAllocaValues[AllocaNo], PN);
  499. return true;
  500. }
  501. // RenamePass - Recursively traverse the CFG of the function, renaming loads and
  502. // stores to the allocas which we are promoting. IncomingVals indicates what
  503. // value each Alloca contains on exit from the predecessor block Pred.
  504. //
  505. void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
  506. std::vector<Value*> &IncomingVals) {
  507. // If this BB needs a PHI node, update the PHI node for each variable we need
  508. // PHI nodes for.
  509. std::map<BasicBlock*, std::vector<PHINode *> >::iterator
  510. BBPNI = NewPhiNodes.find(BB);
  511. if (BBPNI != NewPhiNodes.end()) {
  512. std::vector<PHINode *> &BBPNs = BBPNI->second;
  513. for (unsigned k = 0; k != BBPNs.size(); ++k)
  514. if (PHINode *PN = BBPNs[k]) {
  515. // Add this incoming value to the PHI node.
  516. PN->addIncoming(IncomingVals[k], Pred);
  517. // The currently active variable for this block is now the PHI.
  518. IncomingVals[k] = PN;
  519. }
  520. }
  521. // don't revisit nodes
  522. if (Visited.count(BB)) return;
  523. // mark as visited
  524. Visited.insert(BB);
  525. for (BasicBlock::iterator II = BB->begin(); !isa<TerminatorInst>(II); ) {
  526. Instruction *I = II++; // get the instruction, increment iterator
  527. if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
  528. if (AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand())) {
  529. std::map<AllocaInst*, unsigned>::iterator AI = AllocaLookup.find(Src);
  530. if (AI != AllocaLookup.end()) {
  531. Value *V = IncomingVals[AI->second];
  532. // walk the use list of this load and replace all uses with r
  533. LI->replaceAllUsesWith(V);
  534. if (AST && isa<PointerType>(LI->getType()))
  535. AST->deleteValue(LI);
  536. BB->getInstList().erase(LI);
  537. }
  538. }
  539. } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
  540. // Delete this instruction and mark the name as the current holder of the
  541. // value
  542. if (AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand())) {
  543. std::map<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest);
  544. if (ai != AllocaLookup.end()) {
  545. // what value were we writing?
  546. IncomingVals[ai->second] = SI->getOperand(0);
  547. BB->getInstList().erase(SI);
  548. }
  549. }
  550. }
  551. }
  552. // Recurse to our successors.
  553. TerminatorInst *TI = BB->getTerminator();
  554. for (unsigned i = 0; i != TI->getNumSuccessors(); i++) {
  555. std::vector<Value*> OutgoingVals(IncomingVals);
  556. RenamePass(TI->getSuccessor(i), BB, OutgoingVals);
  557. }
  558. }
  559. /// PromoteMemToReg - Promote the specified list of alloca instructions into
  560. /// scalar registers, inserting PHI nodes as appropriate. This function makes
  561. /// use of DominanceFrontier information. This function does not modify the CFG
  562. /// of the function at all. All allocas must be from the same function.
  563. ///
  564. /// If AST is specified, the specified tracker is updated to reflect changes
  565. /// made to the IR.
  566. ///
  567. void llvm::PromoteMemToReg(const std::vector<AllocaInst*> &Allocas,
  568. DominatorTree &DT, DominanceFrontier &DF,
  569. const TargetData &TD, AliasSetTracker *AST) {
  570. // If there is nothing to do, bail out...
  571. if (Allocas.empty()) return;
  572. std::vector<AllocaInst*> RetryList;
  573. PromoteMem2Reg(Allocas, RetryList, DT, DF, TD, AST).run();
  574. // PromoteMem2Reg may not have been able to promote all of the allocas in one
  575. // pass, run it again if needed.
  576. while (!RetryList.empty()) {
  577. // If we need to retry some allocas, this is due to there being no store
  578. // before a read in a local block. To counteract this, insert a store of
  579. // undef into the alloca right after the alloca itself.
  580. for (unsigned i = 0, e = RetryList.size(); i != e; ++i) {
  581. BasicBlock::iterator BBI = RetryList[i];
  582. new StoreInst(UndefValue::get(RetryList[i]->getAllocatedType()),
  583. RetryList[i], ++BBI);
  584. }
  585. std::vector<AllocaInst*> NewAllocas;
  586. std::swap(NewAllocas, RetryList);
  587. PromoteMem2Reg(NewAllocas, RetryList, DT, DF, TD, AST).run();
  588. }
  589. }