CodeMetrics.cpp 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. //===- CodeMetrics.cpp - Code cost measurements ---------------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file implements code cost measurement utilities.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/Analysis/CodeMetrics.h"
  14. #include "llvm/DataLayout.h"
  15. #include "llvm/Function.h"
  16. #include "llvm/IntrinsicInst.h"
  17. #include "llvm/Support/CallSite.h"
  18. using namespace llvm;
  19. /// callIsSmall - If a call is likely to lower to a single target instruction,
  20. /// or is otherwise deemed small return true.
  21. /// TODO: Perhaps calls like memcpy, strcpy, etc?
  22. bool llvm::callIsSmall(ImmutableCallSite CS) {
  23. if (isa<IntrinsicInst>(CS.getInstruction()))
  24. return true;
  25. const Function *F = CS.getCalledFunction();
  26. if (!F) return false;
  27. if (F->hasLocalLinkage()) return false;
  28. if (!F->hasName()) return false;
  29. StringRef Name = F->getName();
  30. // These will all likely lower to a single selection DAG node.
  31. if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
  32. Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
  33. Name == "sin" || Name == "sinf" || Name == "sinl" ||
  34. Name == "cos" || Name == "cosf" || Name == "cosl" ||
  35. Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" )
  36. return true;
  37. // These are all likely to be optimized into something smaller.
  38. if (Name == "pow" || Name == "powf" || Name == "powl" ||
  39. Name == "exp2" || Name == "exp2l" || Name == "exp2f" ||
  40. Name == "floor" || Name == "floorf" || Name == "ceil" ||
  41. Name == "round" || Name == "ffs" || Name == "ffsl" ||
  42. Name == "abs" || Name == "labs" || Name == "llabs")
  43. return true;
  44. return false;
  45. }
  46. bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
  47. if (isa<PHINode>(I))
  48. return true;
  49. // If a GEP has all constant indices, it will probably be folded with
  50. // a load/store.
  51. if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
  52. return GEP->hasAllConstantIndices();
  53. if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
  54. switch (II->getIntrinsicID()) {
  55. default:
  56. return false;
  57. case Intrinsic::dbg_declare:
  58. case Intrinsic::dbg_value:
  59. case Intrinsic::invariant_start:
  60. case Intrinsic::invariant_end:
  61. case Intrinsic::lifetime_start:
  62. case Intrinsic::lifetime_end:
  63. case Intrinsic::objectsize:
  64. case Intrinsic::ptr_annotation:
  65. case Intrinsic::var_annotation:
  66. // These intrinsics don't count as size.
  67. return true;
  68. }
  69. }
  70. if (const CastInst *CI = dyn_cast<CastInst>(I)) {
  71. // Noop casts, including ptr <-> int, don't count.
  72. if (CI->isLosslessCast())
  73. return true;
  74. Value *Op = CI->getOperand(0);
  75. // An inttoptr cast is free so long as the input is a legal integer type
  76. // which doesn't contain values outside the range of a pointer.
  77. if (isa<IntToPtrInst>(CI) && TD &&
  78. TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
  79. Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits())
  80. return true;
  81. // A ptrtoint cast is free so long as the result is large enough to store
  82. // the pointer, and a legal integer type.
  83. if (isa<PtrToIntInst>(CI) && TD &&
  84. TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
  85. Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits())
  86. return true;
  87. // trunc to a native type is free (assuming the target has compare and
  88. // shift-right of the same width).
  89. if (TD && isa<TruncInst>(CI) &&
  90. TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType())))
  91. return true;
  92. // Result of a cmp instruction is often extended (to be used by other
  93. // cmp instructions, logical or return instructions). These are usually
  94. // nop on most sane targets.
  95. if (isa<CmpInst>(CI->getOperand(0)))
  96. return true;
  97. }
  98. return false;
  99. }
  100. /// analyzeBasicBlock - Fill in the current structure with information gleaned
  101. /// from the specified block.
  102. void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
  103. const DataLayout *TD) {
  104. ++NumBlocks;
  105. unsigned NumInstsBeforeThisBB = NumInsts;
  106. for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
  107. II != E; ++II) {
  108. if (isInstructionFree(II, TD))
  109. continue;
  110. // Special handling for calls.
  111. if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
  112. ImmutableCallSite CS(cast<Instruction>(II));
  113. if (const Function *F = CS.getCalledFunction()) {
  114. // If a function is both internal and has a single use, then it is
  115. // extremely likely to get inlined in the future (it was probably
  116. // exposed by an interleaved devirtualization pass).
  117. if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse())
  118. ++NumInlineCandidates;
  119. // If this call is to function itself, then the function is recursive.
  120. // Inlining it into other functions is a bad idea, because this is
  121. // basically just a form of loop peeling, and our metrics aren't useful
  122. // for that case.
  123. if (F == BB->getParent())
  124. isRecursive = true;
  125. }
  126. if (!callIsSmall(CS)) {
  127. // Each argument to a call takes on average one instruction to set up.
  128. NumInsts += CS.arg_size();
  129. // We don't want inline asm to count as a call - that would prevent loop
  130. // unrolling. The argument setup cost is still real, though.
  131. if (!isa<InlineAsm>(CS.getCalledValue()))
  132. ++NumCalls;
  133. }
  134. }
  135. if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
  136. if (!AI->isStaticAlloca())
  137. this->usesDynamicAlloca = true;
  138. }
  139. if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
  140. ++NumVectorInsts;
  141. ++NumInsts;
  142. }
  143. if (isa<ReturnInst>(BB->getTerminator()))
  144. ++NumRets;
  145. // We never want to inline functions that contain an indirectbr. This is
  146. // incorrect because all the blockaddress's (in static global initializers
  147. // for example) would be referring to the original function, and this indirect
  148. // jump would jump from the inlined copy of the function into the original
  149. // function which is extremely undefined behavior.
  150. // FIXME: This logic isn't really right; we can safely inline functions
  151. // with indirectbr's as long as no other function or global references the
  152. // blockaddress of a block within the current function. And as a QOI issue,
  153. // if someone is using a blockaddress without an indirectbr, and that
  154. // reference somehow ends up in another function or global, we probably
  155. // don't want to inline this function.
  156. if (isa<IndirectBrInst>(BB->getTerminator()))
  157. containsIndirectBr = true;
  158. // Remember NumInsts for this BB.
  159. NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
  160. }
  161. void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) {
  162. // If this function contains a call that "returns twice" (e.g., setjmp or
  163. // _setjmp) and it isn't marked with "returns twice" itself, never inline it.
  164. // This is a hack because we depend on the user marking their local variables
  165. // as volatile if they are live across a setjmp call, and they probably
  166. // won't do this in callers.
  167. exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
  168. !F->getFnAttributes().hasAttribute(Attributes::ReturnsTwice);
  169. // Look at the size of the callee.
  170. for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
  171. analyzeBasicBlock(&*BB, TD);
  172. }