MergeFunctions.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. //===- MergeFunctions.cpp - Merge identical functions ---------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass looks for equivalent functions that are mergable and folds them.
  11. //
  12. // A hash is computed from the function, based on its type and number of
  13. // basic blocks.
  14. //
  15. // Once all hashes are computed, we perform an expensive equality comparison
  16. // on each function pair. This takes n^2/2 comparisons per bucket, so it's
  17. // important that the hash function be high quality. The equality comparison
  18. // iterates through each instruction in each basic block.
  19. //
  20. // When a match is found the functions are folded. If both functions are
  21. // overridable, we move the functionality into a new internal function and
  22. // leave two overridable thunks to it.
  23. //
  24. //===----------------------------------------------------------------------===//
  25. //
  26. // Future work:
  27. //
  28. // * virtual functions.
  29. //
  30. // Many functions have their address taken by the virtual function table for
  31. // the object they belong to. However, as long as it's only used for a lookup
  32. // and call, this is irrelevant, and we'd like to fold such functions.
  33. //
  34. // * switch from n^2 pair-wise comparisons to an n-way comparison for each
  35. // bucket.
  36. //
  37. // * be smarter about bitcasts.
  38. //
  39. // In order to fold functions, we will sometimes add either bitcast instructions
  40. // or bitcast constant expressions. Unfortunately, this can confound further
  41. // analysis since the two functions differ where one has a bitcast and the
  42. // other doesn't. We should learn to look through bitcasts.
  43. //
  44. //===----------------------------------------------------------------------===//
  45. #define DEBUG_TYPE "mergefunc"
  46. #include "llvm/Transforms/IPO.h"
  47. #include "llvm/Constants.h"
  48. #include "llvm/IRBuilder.h"
  49. #include "llvm/InlineAsm.h"
  50. #include "llvm/Instructions.h"
  51. #include "llvm/LLVMContext.h"
  52. #include "llvm/Module.h"
  53. #include "llvm/Operator.h"
  54. #include "llvm/Pass.h"
  55. #include "llvm/ADT/DenseSet.h"
  56. #include "llvm/ADT/FoldingSet.h"
  57. #include "llvm/ADT/STLExtras.h"
  58. #include "llvm/ADT/SmallSet.h"
  59. #include "llvm/ADT/Statistic.h"
  60. #include "llvm/Support/CallSite.h"
  61. #include "llvm/Support/Debug.h"
  62. #include "llvm/Support/ErrorHandling.h"
  63. #include "llvm/Support/ValueHandle.h"
  64. #include "llvm/Support/raw_ostream.h"
  65. #include "llvm/Target/TargetData.h"
  66. #include <vector>
  67. using namespace llvm;
  68. STATISTIC(NumFunctionsMerged, "Number of functions merged");
  69. STATISTIC(NumThunksWritten, "Number of thunks generated");
  70. STATISTIC(NumAliasesWritten, "Number of aliases generated");
  71. STATISTIC(NumDoubleWeak, "Number of new functions created");
  72. /// Creates a hash-code for the function which is the same for any two
  73. /// functions that will compare equal, without looking at the instructions
  74. /// inside the function.
  75. static unsigned profileFunction(const Function *F) {
  76. FunctionType *FTy = F->getFunctionType();
  77. FoldingSetNodeID ID;
  78. ID.AddInteger(F->size());
  79. ID.AddInteger(F->getCallingConv());
  80. ID.AddBoolean(F->hasGC());
  81. ID.AddBoolean(FTy->isVarArg());
  82. ID.AddInteger(FTy->getReturnType()->getTypeID());
  83. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
  84. ID.AddInteger(FTy->getParamType(i)->getTypeID());
  85. return ID.ComputeHash();
  86. }
  87. namespace {
  88. /// ComparableFunction - A struct that pairs together functions with a
  89. /// TargetData so that we can keep them together as elements in the DenseSet.
  90. class ComparableFunction {
  91. public:
  92. static const ComparableFunction EmptyKey;
  93. static const ComparableFunction TombstoneKey;
  94. static TargetData * const LookupOnly;
  95. ComparableFunction(Function *Func, TargetData *TD)
  96. : Func(Func), Hash(profileFunction(Func)), TD(TD) {}
  97. Function *getFunc() const { return Func; }
  98. unsigned getHash() const { return Hash; }
  99. TargetData *getTD() const { return TD; }
  100. // Drops AssertingVH reference to the function. Outside of debug mode, this
  101. // does nothing.
  102. void release() {
  103. assert(Func &&
  104. "Attempted to release function twice, or release empty/tombstone!");
  105. Func = NULL;
  106. }
  107. private:
  108. explicit ComparableFunction(unsigned Hash)
  109. : Func(NULL), Hash(Hash), TD(NULL) {}
  110. AssertingVH<Function> Func;
  111. unsigned Hash;
  112. TargetData *TD;
  113. };
  114. const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
  115. const ComparableFunction ComparableFunction::TombstoneKey =
  116. ComparableFunction(1);
  117. TargetData *const ComparableFunction::LookupOnly = (TargetData*)(-1);
  118. }
  119. namespace llvm {
  120. template <>
  121. struct DenseMapInfo<ComparableFunction> {
  122. static ComparableFunction getEmptyKey() {
  123. return ComparableFunction::EmptyKey;
  124. }
  125. static ComparableFunction getTombstoneKey() {
  126. return ComparableFunction::TombstoneKey;
  127. }
  128. static unsigned getHashValue(const ComparableFunction &CF) {
  129. return CF.getHash();
  130. }
  131. static bool isEqual(const ComparableFunction &LHS,
  132. const ComparableFunction &RHS);
  133. };
  134. }
  135. namespace {
  136. /// FunctionComparator - Compares two functions to determine whether or not
  137. /// they will generate machine code with the same behaviour. TargetData is
  138. /// used if available. The comparator always fails conservatively (erring on the
  139. /// side of claiming that two functions are different).
  140. class FunctionComparator {
  141. public:
  142. FunctionComparator(const TargetData *TD, const Function *F1,
  143. const Function *F2)
  144. : F1(F1), F2(F2), TD(TD) {}
  145. /// Test whether the two functions have equivalent behaviour.
  146. bool compare();
  147. private:
  148. /// Test whether two basic blocks have equivalent behaviour.
  149. bool compare(const BasicBlock *BB1, const BasicBlock *BB2);
  150. /// Assign or look up previously assigned numbers for the two values, and
  151. /// return whether the numbers are equal. Numbers are assigned in the order
  152. /// visited.
  153. bool enumerate(const Value *V1, const Value *V2);
  154. /// Compare two Instructions for equivalence, similar to
  155. /// Instruction::isSameOperationAs but with modifications to the type
  156. /// comparison.
  157. bool isEquivalentOperation(const Instruction *I1,
  158. const Instruction *I2) const;
  159. /// Compare two GEPs for equivalent pointer arithmetic.
  160. bool isEquivalentGEP(const GEPOperator *GEP1, const GEPOperator *GEP2);
  161. bool isEquivalentGEP(const GetElementPtrInst *GEP1,
  162. const GetElementPtrInst *GEP2) {
  163. return isEquivalentGEP(cast<GEPOperator>(GEP1), cast<GEPOperator>(GEP2));
  164. }
  165. /// Compare two Types, treating all pointer types as equal.
  166. bool isEquivalentType(Type *Ty1, Type *Ty2) const;
  167. // The two functions undergoing comparison.
  168. const Function *F1, *F2;
  169. const TargetData *TD;
  170. DenseMap<const Value *, const Value *> id_map;
  171. DenseSet<const Value *> seen_values;
  172. };
  173. }
  174. // Any two pointers in the same address space are equivalent, intptr_t and
  175. // pointers are equivalent. Otherwise, standard type equivalence rules apply.
  176. bool FunctionComparator::isEquivalentType(Type *Ty1,
  177. Type *Ty2) const {
  178. if (Ty1 == Ty2)
  179. return true;
  180. if (Ty1->getTypeID() != Ty2->getTypeID()) {
  181. if (TD) {
  182. LLVMContext &Ctx = Ty1->getContext();
  183. if (isa<PointerType>(Ty1) && Ty2 == TD->getIntPtrType(Ctx)) return true;
  184. if (isa<PointerType>(Ty2) && Ty1 == TD->getIntPtrType(Ctx)) return true;
  185. }
  186. return false;
  187. }
  188. switch (Ty1->getTypeID()) {
  189. default:
  190. llvm_unreachable("Unknown type!");
  191. // Fall through in Release mode.
  192. case Type::IntegerTyID:
  193. case Type::VectorTyID:
  194. // Ty1 == Ty2 would have returned true earlier.
  195. return false;
  196. case Type::VoidTyID:
  197. case Type::FloatTyID:
  198. case Type::DoubleTyID:
  199. case Type::X86_FP80TyID:
  200. case Type::FP128TyID:
  201. case Type::PPC_FP128TyID:
  202. case Type::LabelTyID:
  203. case Type::MetadataTyID:
  204. return true;
  205. case Type::PointerTyID: {
  206. PointerType *PTy1 = cast<PointerType>(Ty1);
  207. PointerType *PTy2 = cast<PointerType>(Ty2);
  208. return PTy1->getAddressSpace() == PTy2->getAddressSpace();
  209. }
  210. case Type::StructTyID: {
  211. StructType *STy1 = cast<StructType>(Ty1);
  212. StructType *STy2 = cast<StructType>(Ty2);
  213. if (STy1->getNumElements() != STy2->getNumElements())
  214. return false;
  215. if (STy1->isPacked() != STy2->isPacked())
  216. return false;
  217. for (unsigned i = 0, e = STy1->getNumElements(); i != e; ++i) {
  218. if (!isEquivalentType(STy1->getElementType(i), STy2->getElementType(i)))
  219. return false;
  220. }
  221. return true;
  222. }
  223. case Type::FunctionTyID: {
  224. FunctionType *FTy1 = cast<FunctionType>(Ty1);
  225. FunctionType *FTy2 = cast<FunctionType>(Ty2);
  226. if (FTy1->getNumParams() != FTy2->getNumParams() ||
  227. FTy1->isVarArg() != FTy2->isVarArg())
  228. return false;
  229. if (!isEquivalentType(FTy1->getReturnType(), FTy2->getReturnType()))
  230. return false;
  231. for (unsigned i = 0, e = FTy1->getNumParams(); i != e; ++i) {
  232. if (!isEquivalentType(FTy1->getParamType(i), FTy2->getParamType(i)))
  233. return false;
  234. }
  235. return true;
  236. }
  237. case Type::ArrayTyID: {
  238. ArrayType *ATy1 = cast<ArrayType>(Ty1);
  239. ArrayType *ATy2 = cast<ArrayType>(Ty2);
  240. return ATy1->getNumElements() == ATy2->getNumElements() &&
  241. isEquivalentType(ATy1->getElementType(), ATy2->getElementType());
  242. }
  243. }
  244. }
  245. // Determine whether the two operations are the same except that pointer-to-A
  246. // and pointer-to-B are equivalent. This should be kept in sync with
  247. // Instruction::isSameOperationAs.
  248. bool FunctionComparator::isEquivalentOperation(const Instruction *I1,
  249. const Instruction *I2) const {
  250. // Differences from Instruction::isSameOperationAs:
  251. // * replace type comparison with calls to isEquivalentType.
  252. // * we test for I->hasSameSubclassOptionalData (nuw/nsw/tail) at the top
  253. // * because of the above, we don't test for the tail bit on calls later on
  254. if (I1->getOpcode() != I2->getOpcode() ||
  255. I1->getNumOperands() != I2->getNumOperands() ||
  256. !isEquivalentType(I1->getType(), I2->getType()) ||
  257. !I1->hasSameSubclassOptionalData(I2))
  258. return false;
  259. // We have two instructions of identical opcode and #operands. Check to see
  260. // if all operands are the same type
  261. for (unsigned i = 0, e = I1->getNumOperands(); i != e; ++i)
  262. if (!isEquivalentType(I1->getOperand(i)->getType(),
  263. I2->getOperand(i)->getType()))
  264. return false;
  265. // Check special state that is a part of some instructions.
  266. if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
  267. return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
  268. LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() &&
  269. LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
  270. LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope();
  271. if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
  272. return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
  273. SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() &&
  274. SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
  275. SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope();
  276. if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
  277. return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
  278. if (const CallInst *CI = dyn_cast<CallInst>(I1))
  279. return CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
  280. CI->getAttributes() == cast<CallInst>(I2)->getAttributes();
  281. if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
  282. return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
  283. CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes();
  284. if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
  285. return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
  286. if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
  287. return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
  288. if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
  289. return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
  290. FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
  291. if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
  292. return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
  293. CXI->getOrdering() == cast<AtomicCmpXchgInst>(I2)->getOrdering() &&
  294. CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
  295. if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
  296. return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
  297. RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
  298. RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
  299. RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope();
  300. return true;
  301. }
  302. // Determine whether two GEP operations perform the same underlying arithmetic.
  303. bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1,
  304. const GEPOperator *GEP2) {
  305. // When we have target data, we can reduce the GEP down to the value in bytes
  306. // added to the address.
  307. if (TD && GEP1->hasAllConstantIndices() && GEP2->hasAllConstantIndices()) {
  308. SmallVector<Value *, 8> Indices1(GEP1->idx_begin(), GEP1->idx_end());
  309. SmallVector<Value *, 8> Indices2(GEP2->idx_begin(), GEP2->idx_end());
  310. uint64_t Offset1 = TD->getIndexedOffset(GEP1->getPointerOperandType(),
  311. Indices1);
  312. uint64_t Offset2 = TD->getIndexedOffset(GEP2->getPointerOperandType(),
  313. Indices2);
  314. return Offset1 == Offset2;
  315. }
  316. if (GEP1->getPointerOperand()->getType() !=
  317. GEP2->getPointerOperand()->getType())
  318. return false;
  319. if (GEP1->getNumOperands() != GEP2->getNumOperands())
  320. return false;
  321. for (unsigned i = 0, e = GEP1->getNumOperands(); i != e; ++i) {
  322. if (!enumerate(GEP1->getOperand(i), GEP2->getOperand(i)))
  323. return false;
  324. }
  325. return true;
  326. }
  327. // Compare two values used by the two functions under pair-wise comparison. If
  328. // this is the first time the values are seen, they're added to the mapping so
  329. // that we will detect mismatches on next use.
  330. bool FunctionComparator::enumerate(const Value *V1, const Value *V2) {
  331. // Check for function @f1 referring to itself and function @f2 referring to
  332. // itself, or referring to each other, or both referring to either of them.
  333. // They're all equivalent if the two functions are otherwise equivalent.
  334. if (V1 == F1 && V2 == F2)
  335. return true;
  336. if (V1 == F2 && V2 == F1)
  337. return true;
  338. if (const Constant *C1 = dyn_cast<Constant>(V1)) {
  339. if (V1 == V2) return true;
  340. const Constant *C2 = dyn_cast<Constant>(V2);
  341. if (!C2) return false;
  342. // TODO: constant expressions with GEP or references to F1 or F2.
  343. if (C1->isNullValue() && C2->isNullValue() &&
  344. isEquivalentType(C1->getType(), C2->getType()))
  345. return true;
  346. // Try bitcasting C2 to C1's type. If the bitcast is legal and returns C1
  347. // then they must have equal bit patterns.
  348. return C1->getType()->canLosslesslyBitCastTo(C2->getType()) &&
  349. C1 == ConstantExpr::getBitCast(const_cast<Constant*>(C2), C1->getType());
  350. }
  351. if (isa<InlineAsm>(V1) || isa<InlineAsm>(V2))
  352. return V1 == V2;
  353. // Check that V1 maps to V2. If we find a value that V1 maps to then we simply
  354. // check whether it's equal to V2. When there is no mapping then we need to
  355. // ensure that V2 isn't already equivalent to something else. For this
  356. // purpose, we track the V2 values in a set.
  357. const Value *&map_elem = id_map[V1];
  358. if (map_elem)
  359. return map_elem == V2;
  360. if (!seen_values.insert(V2).second)
  361. return false;
  362. map_elem = V2;
  363. return true;
  364. }
  365. // Test whether two basic blocks have equivalent behaviour.
  366. bool FunctionComparator::compare(const BasicBlock *BB1, const BasicBlock *BB2) {
  367. BasicBlock::const_iterator F1I = BB1->begin(), F1E = BB1->end();
  368. BasicBlock::const_iterator F2I = BB2->begin(), F2E = BB2->end();
  369. do {
  370. if (!enumerate(F1I, F2I))
  371. return false;
  372. if (const GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(F1I)) {
  373. const GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(F2I);
  374. if (!GEP2)
  375. return false;
  376. if (!enumerate(GEP1->getPointerOperand(), GEP2->getPointerOperand()))
  377. return false;
  378. if (!isEquivalentGEP(GEP1, GEP2))
  379. return false;
  380. } else {
  381. if (!isEquivalentOperation(F1I, F2I))
  382. return false;
  383. assert(F1I->getNumOperands() == F2I->getNumOperands());
  384. for (unsigned i = 0, e = F1I->getNumOperands(); i != e; ++i) {
  385. Value *OpF1 = F1I->getOperand(i);
  386. Value *OpF2 = F2I->getOperand(i);
  387. if (!enumerate(OpF1, OpF2))
  388. return false;
  389. if (OpF1->getValueID() != OpF2->getValueID() ||
  390. !isEquivalentType(OpF1->getType(), OpF2->getType()))
  391. return false;
  392. }
  393. }
  394. ++F1I, ++F2I;
  395. } while (F1I != F1E && F2I != F2E);
  396. return F1I == F1E && F2I == F2E;
  397. }
  398. // Test whether the two functions have equivalent behaviour.
  399. bool FunctionComparator::compare() {
  400. // We need to recheck everything, but check the things that weren't included
  401. // in the hash first.
  402. if (F1->getAttributes() != F2->getAttributes())
  403. return false;
  404. if (F1->hasGC() != F2->hasGC())
  405. return false;
  406. if (F1->hasGC() && F1->getGC() != F2->getGC())
  407. return false;
  408. if (F1->hasSection() != F2->hasSection())
  409. return false;
  410. if (F1->hasSection() && F1->getSection() != F2->getSection())
  411. return false;
  412. if (F1->isVarArg() != F2->isVarArg())
  413. return false;
  414. // TODO: if it's internal and only used in direct calls, we could handle this
  415. // case too.
  416. if (F1->getCallingConv() != F2->getCallingConv())
  417. return false;
  418. if (!isEquivalentType(F1->getFunctionType(), F2->getFunctionType()))
  419. return false;
  420. assert(F1->arg_size() == F2->arg_size() &&
  421. "Identically typed functions have different numbers of args!");
  422. // Visit the arguments so that they get enumerated in the order they're
  423. // passed in.
  424. for (Function::const_arg_iterator f1i = F1->arg_begin(),
  425. f2i = F2->arg_begin(), f1e = F1->arg_end(); f1i != f1e; ++f1i, ++f2i) {
  426. if (!enumerate(f1i, f2i))
  427. llvm_unreachable("Arguments repeat!");
  428. }
  429. // We do a CFG-ordered walk since the actual ordering of the blocks in the
  430. // linked list is immaterial. Our walk starts at the entry block for both
  431. // functions, then takes each block from each terminator in order. As an
  432. // artifact, this also means that unreachable blocks are ignored.
  433. SmallVector<const BasicBlock *, 8> F1BBs, F2BBs;
  434. SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1.
  435. F1BBs.push_back(&F1->getEntryBlock());
  436. F2BBs.push_back(&F2->getEntryBlock());
  437. VisitedBBs.insert(F1BBs[0]);
  438. while (!F1BBs.empty()) {
  439. const BasicBlock *F1BB = F1BBs.pop_back_val();
  440. const BasicBlock *F2BB = F2BBs.pop_back_val();
  441. if (!enumerate(F1BB, F2BB) || !compare(F1BB, F2BB))
  442. return false;
  443. const TerminatorInst *F1TI = F1BB->getTerminator();
  444. const TerminatorInst *F2TI = F2BB->getTerminator();
  445. assert(F1TI->getNumSuccessors() == F2TI->getNumSuccessors());
  446. for (unsigned i = 0, e = F1TI->getNumSuccessors(); i != e; ++i) {
  447. if (!VisitedBBs.insert(F1TI->getSuccessor(i)))
  448. continue;
  449. F1BBs.push_back(F1TI->getSuccessor(i));
  450. F2BBs.push_back(F2TI->getSuccessor(i));
  451. }
  452. }
  453. return true;
  454. }
  455. namespace {
  456. /// MergeFunctions finds functions which will generate identical machine code,
  457. /// by considering all pointer types to be equivalent. Once identified,
  458. /// MergeFunctions will fold them by replacing a call to one to a call to a
  459. /// bitcast of the other.
  460. ///
  461. class MergeFunctions : public ModulePass {
  462. public:
  463. static char ID;
  464. MergeFunctions()
  465. : ModulePass(ID), HasGlobalAliases(false) {
  466. initializeMergeFunctionsPass(*PassRegistry::getPassRegistry());
  467. }
  468. bool runOnModule(Module &M);
  469. private:
  470. typedef DenseSet<ComparableFunction> FnSetType;
  471. /// A work queue of functions that may have been modified and should be
  472. /// analyzed again.
  473. std::vector<WeakVH> Deferred;
  474. /// Insert a ComparableFunction into the FnSet, or merge it away if it's
  475. /// equal to one that's already present.
  476. bool insert(ComparableFunction &NewF);
  477. /// Remove a Function from the FnSet and queue it up for a second sweep of
  478. /// analysis.
  479. void remove(Function *F);
  480. /// Find the functions that use this Value and remove them from FnSet and
  481. /// queue the functions.
  482. void removeUsers(Value *V);
  483. /// Replace all direct calls of Old with calls of New. Will bitcast New if
  484. /// necessary to make types match.
  485. void replaceDirectCallers(Function *Old, Function *New);
  486. /// Merge two equivalent functions. Upon completion, G may be deleted, or may
  487. /// be converted into a thunk. In either case, it should never be visited
  488. /// again.
  489. void mergeTwoFunctions(Function *F, Function *G);
  490. /// Replace G with a thunk or an alias to F. Deletes G.
  491. void writeThunkOrAlias(Function *F, Function *G);
  492. /// Replace G with a simple tail call to bitcast(F). Also replace direct uses
  493. /// of G with bitcast(F). Deletes G.
  494. void writeThunk(Function *F, Function *G);
  495. /// Replace G with an alias to F. Deletes G.
  496. void writeAlias(Function *F, Function *G);
  497. /// The set of all distinct functions. Use the insert() and remove() methods
  498. /// to modify it.
  499. FnSetType FnSet;
  500. /// TargetData for more accurate GEP comparisons. May be NULL.
  501. TargetData *TD;
  502. /// Whether or not the target supports global aliases.
  503. bool HasGlobalAliases;
  504. };
  505. } // end anonymous namespace
  506. char MergeFunctions::ID = 0;
  507. INITIALIZE_PASS(MergeFunctions, "mergefunc", "Merge Functions", false, false)
  508. ModulePass *llvm::createMergeFunctionsPass() {
  509. return new MergeFunctions();
  510. }
  511. bool MergeFunctions::runOnModule(Module &M) {
  512. bool Changed = false;
  513. TD = getAnalysisIfAvailable<TargetData>();
  514. for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
  515. if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
  516. Deferred.push_back(WeakVH(I));
  517. }
  518. FnSet.resize(Deferred.size());
  519. do {
  520. std::vector<WeakVH> Worklist;
  521. Deferred.swap(Worklist);
  522. DEBUG(dbgs() << "size of module: " << M.size() << '\n');
  523. DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
  524. // Insert only strong functions and merge them. Strong function merging
  525. // always deletes one of them.
  526. for (std::vector<WeakVH>::iterator I = Worklist.begin(),
  527. E = Worklist.end(); I != E; ++I) {
  528. if (!*I) continue;
  529. Function *F = cast<Function>(*I);
  530. if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
  531. !F->mayBeOverridden()) {
  532. ComparableFunction CF = ComparableFunction(F, TD);
  533. Changed |= insert(CF);
  534. }
  535. }
  536. // Insert only weak functions and merge them. By doing these second we
  537. // create thunks to the strong function when possible. When two weak
  538. // functions are identical, we create a new strong function with two weak
  539. // weak thunks to it which are identical but not mergable.
  540. for (std::vector<WeakVH>::iterator I = Worklist.begin(),
  541. E = Worklist.end(); I != E; ++I) {
  542. if (!*I) continue;
  543. Function *F = cast<Function>(*I);
  544. if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
  545. F->mayBeOverridden()) {
  546. ComparableFunction CF = ComparableFunction(F, TD);
  547. Changed |= insert(CF);
  548. }
  549. }
  550. DEBUG(dbgs() << "size of FnSet: " << FnSet.size() << '\n');
  551. } while (!Deferred.empty());
  552. FnSet.clear();
  553. return Changed;
  554. }
  555. bool DenseMapInfo<ComparableFunction>::isEqual(const ComparableFunction &LHS,
  556. const ComparableFunction &RHS) {
  557. if (LHS.getFunc() == RHS.getFunc() &&
  558. LHS.getHash() == RHS.getHash())
  559. return true;
  560. if (!LHS.getFunc() || !RHS.getFunc())
  561. return false;
  562. // One of these is a special "underlying pointer comparison only" object.
  563. if (LHS.getTD() == ComparableFunction::LookupOnly ||
  564. RHS.getTD() == ComparableFunction::LookupOnly)
  565. return false;
  566. assert(LHS.getTD() == RHS.getTD() &&
  567. "Comparing functions for different targets");
  568. return FunctionComparator(LHS.getTD(), LHS.getFunc(),
  569. RHS.getFunc()).compare();
  570. }
  571. // Replace direct callers of Old with New.
  572. void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
  573. Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
  574. for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
  575. UI != UE;) {
  576. Value::use_iterator TheIter = UI;
  577. ++UI;
  578. CallSite CS(*TheIter);
  579. if (CS && CS.isCallee(TheIter)) {
  580. remove(CS.getInstruction()->getParent()->getParent());
  581. TheIter.getUse().set(BitcastNew);
  582. }
  583. }
  584. }
  585. // Replace G with an alias to F if possible, or else a thunk to F. Deletes G.
  586. void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
  587. if (HasGlobalAliases && G->hasUnnamedAddr()) {
  588. if (G->hasExternalLinkage() || G->hasLocalLinkage() ||
  589. G->hasWeakLinkage()) {
  590. writeAlias(F, G);
  591. return;
  592. }
  593. }
  594. writeThunk(F, G);
  595. }
  596. // Replace G with a simple tail call to bitcast(F). Also replace direct uses
  597. // of G with bitcast(F). Deletes G.
  598. void MergeFunctions::writeThunk(Function *F, Function *G) {
  599. if (!G->mayBeOverridden()) {
  600. // Redirect direct callers of G to F.
  601. replaceDirectCallers(G, F);
  602. }
  603. // If G was internal then we may have replaced all uses of G with F. If so,
  604. // stop here and delete G. There's no need for a thunk.
  605. if (G->hasLocalLinkage() && G->use_empty()) {
  606. G->eraseFromParent();
  607. return;
  608. }
  609. Function *NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "",
  610. G->getParent());
  611. BasicBlock *BB = BasicBlock::Create(F->getContext(), "", NewG);
  612. IRBuilder<false> Builder(BB);
  613. SmallVector<Value *, 16> Args;
  614. unsigned i = 0;
  615. FunctionType *FFTy = F->getFunctionType();
  616. for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end();
  617. AI != AE; ++AI) {
  618. Args.push_back(Builder.CreateBitCast(AI, FFTy->getParamType(i)));
  619. ++i;
  620. }
  621. CallInst *CI = Builder.CreateCall(F, Args);
  622. CI->setTailCall();
  623. CI->setCallingConv(F->getCallingConv());
  624. if (NewG->getReturnType()->isVoidTy()) {
  625. Builder.CreateRetVoid();
  626. } else {
  627. Builder.CreateRet(Builder.CreateBitCast(CI, NewG->getReturnType()));
  628. }
  629. NewG->copyAttributesFrom(G);
  630. NewG->takeName(G);
  631. removeUsers(G);
  632. G->replaceAllUsesWith(NewG);
  633. G->eraseFromParent();
  634. DEBUG(dbgs() << "writeThunk: " << NewG->getName() << '\n');
  635. ++NumThunksWritten;
  636. }
  637. // Replace G with an alias to F and delete G.
  638. void MergeFunctions::writeAlias(Function *F, Function *G) {
  639. Constant *BitcastF = ConstantExpr::getBitCast(F, G->getType());
  640. GlobalAlias *GA = new GlobalAlias(G->getType(), G->getLinkage(), "",
  641. BitcastF, G->getParent());
  642. F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
  643. GA->takeName(G);
  644. GA->setVisibility(G->getVisibility());
  645. removeUsers(G);
  646. G->replaceAllUsesWith(GA);
  647. G->eraseFromParent();
  648. DEBUG(dbgs() << "writeAlias: " << GA->getName() << '\n');
  649. ++NumAliasesWritten;
  650. }
  651. // Merge two equivalent functions. Upon completion, Function G is deleted.
  652. void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
  653. if (F->mayBeOverridden()) {
  654. assert(G->mayBeOverridden());
  655. if (HasGlobalAliases) {
  656. // Make them both thunks to the same internal function.
  657. Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
  658. F->getParent());
  659. H->copyAttributesFrom(F);
  660. H->takeName(F);
  661. removeUsers(F);
  662. F->replaceAllUsesWith(H);
  663. unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
  664. writeAlias(F, G);
  665. writeAlias(F, H);
  666. F->setAlignment(MaxAlignment);
  667. F->setLinkage(GlobalValue::PrivateLinkage);
  668. } else {
  669. // We can't merge them. Instead, pick one and update all direct callers
  670. // to call it and hope that we improve the instruction cache hit rate.
  671. replaceDirectCallers(G, F);
  672. }
  673. ++NumDoubleWeak;
  674. } else {
  675. writeThunkOrAlias(F, G);
  676. }
  677. ++NumFunctionsMerged;
  678. }
  679. // Insert a ComparableFunction into the FnSet, or merge it away if equal to one
  680. // that was already inserted.
  681. bool MergeFunctions::insert(ComparableFunction &NewF) {
  682. std::pair<FnSetType::iterator, bool> Result = FnSet.insert(NewF);
  683. if (Result.second) {
  684. DEBUG(dbgs() << "Inserting as unique: " << NewF.getFunc()->getName() << '\n');
  685. return false;
  686. }
  687. const ComparableFunction &OldF = *Result.first;
  688. // Never thunk a strong function to a weak function.
  689. assert(!OldF.getFunc()->mayBeOverridden() ||
  690. NewF.getFunc()->mayBeOverridden());
  691. DEBUG(dbgs() << " " << OldF.getFunc()->getName() << " == "
  692. << NewF.getFunc()->getName() << '\n');
  693. Function *DeleteF = NewF.getFunc();
  694. NewF.release();
  695. mergeTwoFunctions(OldF.getFunc(), DeleteF);
  696. return true;
  697. }
  698. // Remove a function from FnSet. If it was already in FnSet, add it to Deferred
  699. // so that we'll look at it in the next round.
  700. void MergeFunctions::remove(Function *F) {
  701. // We need to make sure we remove F, not a function "equal" to F per the
  702. // function equality comparator.
  703. //
  704. // The special "lookup only" ComparableFunction bypasses the expensive
  705. // function comparison in favour of a pointer comparison on the underlying
  706. // Function*'s.
  707. ComparableFunction CF = ComparableFunction(F, ComparableFunction::LookupOnly);
  708. if (FnSet.erase(CF)) {
  709. DEBUG(dbgs() << "Removed " << F->getName() << " from set and deferred it.\n");
  710. Deferred.push_back(F);
  711. }
  712. }
  713. // For each instruction used by the value, remove() the function that contains
  714. // the instruction. This should happen right before a call to RAUW.
  715. void MergeFunctions::removeUsers(Value *V) {
  716. std::vector<Value *> Worklist;
  717. Worklist.push_back(V);
  718. while (!Worklist.empty()) {
  719. Value *V = Worklist.back();
  720. Worklist.pop_back();
  721. for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
  722. UI != UE; ++UI) {
  723. Use &U = UI.getUse();
  724. if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
  725. remove(I->getParent()->getParent());
  726. } else if (isa<GlobalValue>(U.getUser())) {
  727. // do nothing
  728. } else if (Constant *C = dyn_cast<Constant>(U.getUser())) {
  729. for (Value::use_iterator CUI = C->use_begin(), CUE = C->use_end();
  730. CUI != CUE; ++CUI)
  731. Worklist.push_back(*CUI);
  732. }
  733. }
  734. }
  735. }