SimplifyLibCalls.cpp 86 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414
  1. //===------ SimplifyLibCalls.cpp - Library calls simplifier ---------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This is a utility pass used for testing the InstructionSimplify analysis.
  11. // The analysis is applied to every instruction, and if it simplifies then the
  12. // instruction is replaced by the simplification. If you are looking for a pass
  13. // that performs serious instruction folding, use the instcombine pass instead.
  14. //
  15. //===----------------------------------------------------------------------===//
  16. #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
  17. #include "llvm/ADT/SmallString.h"
  18. #include "llvm/ADT/StringMap.h"
  19. #include "llvm/ADT/Triple.h"
  20. #include "llvm/Analysis/TargetLibraryInfo.h"
  21. #include "llvm/Analysis/ValueTracking.h"
  22. #include "llvm/IR/DataLayout.h"
  23. #include "llvm/IR/DiagnosticInfo.h"
  24. #include "llvm/IR/Function.h"
  25. #include "llvm/IR/IRBuilder.h"
  26. #include "llvm/IR/IntrinsicInst.h"
  27. #include "llvm/IR/Intrinsics.h"
  28. #include "llvm/IR/LLVMContext.h"
  29. #include "llvm/IR/Module.h"
  30. #include "llvm/IR/PatternMatch.h"
  31. #include "llvm/Support/CommandLine.h"
  32. #include "llvm/Transforms/Utils/BuildLibCalls.h"
  33. #include "llvm/Transforms/Utils/Local.h"
  34. using namespace llvm;
  35. using namespace PatternMatch;
  36. static cl::opt<bool>
  37. ColdErrorCalls("error-reporting-is-cold", cl::init(true), cl::Hidden,
  38. cl::desc("Treat error-reporting calls as cold"));
  39. static cl::opt<bool>
  40. EnableUnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
  41. cl::init(false),
  42. cl::desc("Enable unsafe double to float "
  43. "shrinking for math lib calls"));
  44. //===----------------------------------------------------------------------===//
  45. // Helper Functions
  46. //===----------------------------------------------------------------------===//
  47. static bool ignoreCallingConv(LibFunc Func) {
  48. return Func == LibFunc_abs || Func == LibFunc_labs ||
  49. Func == LibFunc_llabs || Func == LibFunc_strlen;
  50. }
  51. static bool isCallingConvCCompatible(CallInst *CI) {
  52. switch(CI->getCallingConv()) {
  53. default:
  54. return false;
  55. case llvm::CallingConv::C:
  56. return true;
  57. case llvm::CallingConv::ARM_APCS:
  58. case llvm::CallingConv::ARM_AAPCS:
  59. case llvm::CallingConv::ARM_AAPCS_VFP: {
  60. // The iOS ABI diverges from the standard in some cases, so for now don't
  61. // try to simplify those calls.
  62. if (Triple(CI->getModule()->getTargetTriple()).isiOS())
  63. return false;
  64. auto *FuncTy = CI->getFunctionType();
  65. if (!FuncTy->getReturnType()->isPointerTy() &&
  66. !FuncTy->getReturnType()->isIntegerTy() &&
  67. !FuncTy->getReturnType()->isVoidTy())
  68. return false;
  69. for (auto Param : FuncTy->params()) {
  70. if (!Param->isPointerTy() && !Param->isIntegerTy())
  71. return false;
  72. }
  73. return true;
  74. }
  75. }
  76. return false;
  77. }
  78. /// Return true if it only matters that the value is equal or not-equal to zero.
  79. static bool isOnlyUsedInZeroEqualityComparison(Value *V) {
  80. for (User *U : V->users()) {
  81. if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
  82. if (IC->isEquality())
  83. if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
  84. if (C->isNullValue())
  85. continue;
  86. // Unknown instruction.
  87. return false;
  88. }
  89. return true;
  90. }
  91. /// Return true if it is only used in equality comparisons with With.
  92. static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
  93. for (User *U : V->users()) {
  94. if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
  95. if (IC->isEquality() && IC->getOperand(1) == With)
  96. continue;
  97. // Unknown instruction.
  98. return false;
  99. }
  100. return true;
  101. }
  102. static bool callHasFloatingPointArgument(const CallInst *CI) {
  103. return any_of(CI->operands(), [](const Use &OI) {
  104. return OI->getType()->isFloatingPointTy();
  105. });
  106. }
  107. /// \brief Check whether the overloaded unary floating point function
  108. /// corresponding to \a Ty is available.
  109. static bool hasUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
  110. LibFunc DoubleFn, LibFunc FloatFn,
  111. LibFunc LongDoubleFn) {
  112. switch (Ty->getTypeID()) {
  113. case Type::FloatTyID:
  114. return TLI->has(FloatFn);
  115. case Type::DoubleTyID:
  116. return TLI->has(DoubleFn);
  117. default:
  118. return TLI->has(LongDoubleFn);
  119. }
  120. }
  121. //===----------------------------------------------------------------------===//
  122. // String and Memory Library Call Optimizations
  123. //===----------------------------------------------------------------------===//
  124. Value *LibCallSimplifier::optimizeStrCat(CallInst *CI, IRBuilder<> &B) {
  125. // Extract some information from the instruction
  126. Value *Dst = CI->getArgOperand(0);
  127. Value *Src = CI->getArgOperand(1);
  128. // See if we can get the length of the input string.
  129. uint64_t Len = GetStringLength(Src);
  130. if (Len == 0)
  131. return nullptr;
  132. --Len; // Unbias length.
  133. // Handle the simple, do-nothing case: strcat(x, "") -> x
  134. if (Len == 0)
  135. return Dst;
  136. return emitStrLenMemCpy(Src, Dst, Len, B);
  137. }
  138. Value *LibCallSimplifier::emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
  139. IRBuilder<> &B) {
  140. // We need to find the end of the destination string. That's where the
  141. // memory is to be moved to. We just generate a call to strlen.
  142. Value *DstLen = emitStrLen(Dst, B, DL, TLI);
  143. if (!DstLen)
  144. return nullptr;
  145. // Now that we have the destination's length, we must index into the
  146. // destination's pointer to get the actual memcpy destination (end of
  147. // the string .. we're concatenating).
  148. Value *CpyDst = B.CreateGEP(B.getInt8Ty(), Dst, DstLen, "endptr");
  149. // We have enough information to now generate the memcpy call to do the
  150. // concatenation for us. Make a memcpy to copy the nul byte with align = 1.
  151. B.CreateMemCpy(CpyDst, Src,
  152. ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1),
  153. 1);
  154. return Dst;
  155. }
  156. Value *LibCallSimplifier::optimizeStrNCat(CallInst *CI, IRBuilder<> &B) {
  157. // Extract some information from the instruction.
  158. Value *Dst = CI->getArgOperand(0);
  159. Value *Src = CI->getArgOperand(1);
  160. uint64_t Len;
  161. // We don't do anything if length is not constant.
  162. if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
  163. Len = LengthArg->getZExtValue();
  164. else
  165. return nullptr;
  166. // See if we can get the length of the input string.
  167. uint64_t SrcLen = GetStringLength(Src);
  168. if (SrcLen == 0)
  169. return nullptr;
  170. --SrcLen; // Unbias length.
  171. // Handle the simple, do-nothing cases:
  172. // strncat(x, "", c) -> x
  173. // strncat(x, c, 0) -> x
  174. if (SrcLen == 0 || Len == 0)
  175. return Dst;
  176. // We don't optimize this case.
  177. if (Len < SrcLen)
  178. return nullptr;
  179. // strncat(x, s, c) -> strcat(x, s)
  180. // s is constant so the strcat can be optimized further.
  181. return emitStrLenMemCpy(Src, Dst, SrcLen, B);
  182. }
  183. Value *LibCallSimplifier::optimizeStrChr(CallInst *CI, IRBuilder<> &B) {
  184. Function *Callee = CI->getCalledFunction();
  185. FunctionType *FT = Callee->getFunctionType();
  186. Value *SrcStr = CI->getArgOperand(0);
  187. // If the second operand is non-constant, see if we can compute the length
  188. // of the input string and turn this into memchr.
  189. ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  190. if (!CharC) {
  191. uint64_t Len = GetStringLength(SrcStr);
  192. if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32)) // memchr needs i32.
  193. return nullptr;
  194. return emitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
  195. ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len),
  196. B, DL, TLI);
  197. }
  198. // Otherwise, the character is a constant, see if the first argument is
  199. // a string literal. If so, we can constant fold.
  200. StringRef Str;
  201. if (!getConstantStringInfo(SrcStr, Str)) {
  202. if (CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
  203. return B.CreateGEP(B.getInt8Ty(), SrcStr, emitStrLen(SrcStr, B, DL, TLI),
  204. "strchr");
  205. return nullptr;
  206. }
  207. // Compute the offset, make sure to handle the case when we're searching for
  208. // zero (a weird way to spell strlen).
  209. size_t I = (0xFF & CharC->getSExtValue()) == 0
  210. ? Str.size()
  211. : Str.find(CharC->getSExtValue());
  212. if (I == StringRef::npos) // Didn't find the char. strchr returns null.
  213. return Constant::getNullValue(CI->getType());
  214. // strchr(s+n,c) -> gep(s+n+i,c)
  215. return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "strchr");
  216. }
  217. Value *LibCallSimplifier::optimizeStrRChr(CallInst *CI, IRBuilder<> &B) {
  218. Value *SrcStr = CI->getArgOperand(0);
  219. ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  220. // Cannot fold anything if we're not looking for a constant.
  221. if (!CharC)
  222. return nullptr;
  223. StringRef Str;
  224. if (!getConstantStringInfo(SrcStr, Str)) {
  225. // strrchr(s, 0) -> strchr(s, 0)
  226. if (CharC->isZero())
  227. return emitStrChr(SrcStr, '\0', B, TLI);
  228. return nullptr;
  229. }
  230. // Compute the offset.
  231. size_t I = (0xFF & CharC->getSExtValue()) == 0
  232. ? Str.size()
  233. : Str.rfind(CharC->getSExtValue());
  234. if (I == StringRef::npos) // Didn't find the char. Return null.
  235. return Constant::getNullValue(CI->getType());
  236. // strrchr(s+n,c) -> gep(s+n+i,c)
  237. return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "strrchr");
  238. }
  239. Value *LibCallSimplifier::optimizeStrCmp(CallInst *CI, IRBuilder<> &B) {
  240. Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
  241. if (Str1P == Str2P) // strcmp(x,x) -> 0
  242. return ConstantInt::get(CI->getType(), 0);
  243. StringRef Str1, Str2;
  244. bool HasStr1 = getConstantStringInfo(Str1P, Str1);
  245. bool HasStr2 = getConstantStringInfo(Str2P, Str2);
  246. // strcmp(x, y) -> cnst (if both x and y are constant strings)
  247. if (HasStr1 && HasStr2)
  248. return ConstantInt::get(CI->getType(), Str1.compare(Str2));
  249. if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
  250. return B.CreateNeg(
  251. B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), CI->getType()));
  252. if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x
  253. return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
  254. // strcmp(P, "x") -> memcmp(P, "x", 2)
  255. uint64_t Len1 = GetStringLength(Str1P);
  256. uint64_t Len2 = GetStringLength(Str2P);
  257. if (Len1 && Len2) {
  258. return emitMemCmp(Str1P, Str2P,
  259. ConstantInt::get(DL.getIntPtrType(CI->getContext()),
  260. std::min(Len1, Len2)),
  261. B, DL, TLI);
  262. }
  263. return nullptr;
  264. }
  265. Value *LibCallSimplifier::optimizeStrNCmp(CallInst *CI, IRBuilder<> &B) {
  266. Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
  267. if (Str1P == Str2P) // strncmp(x,x,n) -> 0
  268. return ConstantInt::get(CI->getType(), 0);
  269. // Get the length argument if it is constant.
  270. uint64_t Length;
  271. if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
  272. Length = LengthArg->getZExtValue();
  273. else
  274. return nullptr;
  275. if (Length == 0) // strncmp(x,y,0) -> 0
  276. return ConstantInt::get(CI->getType(), 0);
  277. if (Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
  278. return emitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, DL, TLI);
  279. StringRef Str1, Str2;
  280. bool HasStr1 = getConstantStringInfo(Str1P, Str1);
  281. bool HasStr2 = getConstantStringInfo(Str2P, Str2);
  282. // strncmp(x, y) -> cnst (if both x and y are constant strings)
  283. if (HasStr1 && HasStr2) {
  284. StringRef SubStr1 = Str1.substr(0, Length);
  285. StringRef SubStr2 = Str2.substr(0, Length);
  286. return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2));
  287. }
  288. if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x
  289. return B.CreateNeg(
  290. B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), CI->getType()));
  291. if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
  292. return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
  293. return nullptr;
  294. }
  295. Value *LibCallSimplifier::optimizeStrCpy(CallInst *CI, IRBuilder<> &B) {
  296. Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
  297. if (Dst == Src) // strcpy(x,x) -> x
  298. return Src;
  299. // See if we can get the length of the input string.
  300. uint64_t Len = GetStringLength(Src);
  301. if (Len == 0)
  302. return nullptr;
  303. // We have enough information to now generate the memcpy call to do the
  304. // copy for us. Make a memcpy to copy the nul byte with align = 1.
  305. B.CreateMemCpy(Dst, Src,
  306. ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len), 1);
  307. return Dst;
  308. }
  309. Value *LibCallSimplifier::optimizeStpCpy(CallInst *CI, IRBuilder<> &B) {
  310. Function *Callee = CI->getCalledFunction();
  311. Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
  312. if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
  313. Value *StrLen = emitStrLen(Src, B, DL, TLI);
  314. return StrLen ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, StrLen) : nullptr;
  315. }
  316. // See if we can get the length of the input string.
  317. uint64_t Len = GetStringLength(Src);
  318. if (Len == 0)
  319. return nullptr;
  320. Type *PT = Callee->getFunctionType()->getParamType(0);
  321. Value *LenV = ConstantInt::get(DL.getIntPtrType(PT), Len);
  322. Value *DstEnd = B.CreateGEP(B.getInt8Ty(), Dst,
  323. ConstantInt::get(DL.getIntPtrType(PT), Len - 1));
  324. // We have enough information to now generate the memcpy call to do the
  325. // copy for us. Make a memcpy to copy the nul byte with align = 1.
  326. B.CreateMemCpy(Dst, Src, LenV, 1);
  327. return DstEnd;
  328. }
  329. Value *LibCallSimplifier::optimizeStrNCpy(CallInst *CI, IRBuilder<> &B) {
  330. Function *Callee = CI->getCalledFunction();
  331. Value *Dst = CI->getArgOperand(0);
  332. Value *Src = CI->getArgOperand(1);
  333. Value *LenOp = CI->getArgOperand(2);
  334. // See if we can get the length of the input string.
  335. uint64_t SrcLen = GetStringLength(Src);
  336. if (SrcLen == 0)
  337. return nullptr;
  338. --SrcLen;
  339. if (SrcLen == 0) {
  340. // strncpy(x, "", y) -> memset(x, '\0', y, 1)
  341. B.CreateMemSet(Dst, B.getInt8('\0'), LenOp, 1);
  342. return Dst;
  343. }
  344. uint64_t Len;
  345. if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(LenOp))
  346. Len = LengthArg->getZExtValue();
  347. else
  348. return nullptr;
  349. if (Len == 0)
  350. return Dst; // strncpy(x, y, 0) -> x
  351. // Let strncpy handle the zero padding
  352. if (Len > SrcLen + 1)
  353. return nullptr;
  354. Type *PT = Callee->getFunctionType()->getParamType(0);
  355. // strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
  356. B.CreateMemCpy(Dst, Src, ConstantInt::get(DL.getIntPtrType(PT), Len), 1);
  357. return Dst;
  358. }
  359. Value *LibCallSimplifier::optimizeStrLen(CallInst *CI, IRBuilder<> &B) {
  360. Value *Src = CI->getArgOperand(0);
  361. // Constant folding: strlen("xyz") -> 3
  362. if (uint64_t Len = GetStringLength(Src))
  363. return ConstantInt::get(CI->getType(), Len - 1);
  364. // If s is a constant pointer pointing to a string literal, we can fold
  365. // strlen(s + x) to strlen(s) - x, when x is known to be in the range
  366. // [0, strlen(s)] or the string has a single null terminator '\0' at the end.
  367. // We only try to simplify strlen when the pointer s points to an array
  368. // of i8. Otherwise, we would need to scale the offset x before doing the
  369. // subtraction. This will make the optimization more complex, and it's not
  370. // very useful because calling strlen for a pointer of other types is
  371. // very uncommon.
  372. if (GEPOperator *GEP = dyn_cast<GEPOperator>(Src)) {
  373. if (!isGEPBasedOnPointerToString(GEP))
  374. return nullptr;
  375. StringRef Str;
  376. if (getConstantStringInfo(GEP->getOperand(0), Str, 0, false)) {
  377. size_t NullTermIdx = Str.find('\0');
  378. // If the string does not have '\0', leave it to strlen to compute
  379. // its length.
  380. if (NullTermIdx == StringRef::npos)
  381. return nullptr;
  382. Value *Offset = GEP->getOperand(2);
  383. unsigned BitWidth = Offset->getType()->getIntegerBitWidth();
  384. APInt KnownZero(BitWidth, 0);
  385. APInt KnownOne(BitWidth, 0);
  386. computeKnownBits(Offset, KnownZero, KnownOne, DL, 0, nullptr, CI,
  387. nullptr);
  388. KnownZero.flipAllBits();
  389. size_t ArrSize =
  390. cast<ArrayType>(GEP->getSourceElementType())->getNumElements();
  391. // KnownZero's bits are flipped, so zeros in KnownZero now represent
  392. // bits known to be zeros in Offset, and ones in KnowZero represent
  393. // bits unknown in Offset. Therefore, Offset is known to be in range
  394. // [0, NullTermIdx] when the flipped KnownZero is non-negative and
  395. // unsigned-less-than NullTermIdx.
  396. //
  397. // If Offset is not provably in the range [0, NullTermIdx], we can still
  398. // optimize if we can prove that the program has undefined behavior when
  399. // Offset is outside that range. That is the case when GEP->getOperand(0)
  400. // is a pointer to an object whose memory extent is NullTermIdx+1.
  401. if ((KnownZero.isNonNegative() && KnownZero.ule(NullTermIdx)) ||
  402. (GEP->isInBounds() && isa<GlobalVariable>(GEP->getOperand(0)) &&
  403. NullTermIdx == ArrSize - 1))
  404. return B.CreateSub(ConstantInt::get(CI->getType(), NullTermIdx),
  405. Offset);
  406. }
  407. return nullptr;
  408. }
  409. // strlen(x?"foo":"bars") --> x ? 3 : 4
  410. if (SelectInst *SI = dyn_cast<SelectInst>(Src)) {
  411. uint64_t LenTrue = GetStringLength(SI->getTrueValue());
  412. uint64_t LenFalse = GetStringLength(SI->getFalseValue());
  413. if (LenTrue && LenFalse) {
  414. Function *Caller = CI->getParent()->getParent();
  415. emitOptimizationRemark(CI->getContext(), "simplify-libcalls", *Caller,
  416. SI->getDebugLoc(),
  417. "folded strlen(select) to select of constants");
  418. return B.CreateSelect(SI->getCondition(),
  419. ConstantInt::get(CI->getType(), LenTrue - 1),
  420. ConstantInt::get(CI->getType(), LenFalse - 1));
  421. }
  422. }
  423. // strlen(x) != 0 --> *x != 0
  424. // strlen(x) == 0 --> *x == 0
  425. if (isOnlyUsedInZeroEqualityComparison(CI))
  426. return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType());
  427. return nullptr;
  428. }
  429. Value *LibCallSimplifier::optimizeStrPBrk(CallInst *CI, IRBuilder<> &B) {
  430. StringRef S1, S2;
  431. bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
  432. bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
  433. // strpbrk(s, "") -> nullptr
  434. // strpbrk("", s) -> nullptr
  435. if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
  436. return Constant::getNullValue(CI->getType());
  437. // Constant folding.
  438. if (HasS1 && HasS2) {
  439. size_t I = S1.find_first_of(S2);
  440. if (I == StringRef::npos) // No match.
  441. return Constant::getNullValue(CI->getType());
  442. return B.CreateGEP(B.getInt8Ty(), CI->getArgOperand(0), B.getInt64(I),
  443. "strpbrk");
  444. }
  445. // strpbrk(s, "a") -> strchr(s, 'a')
  446. if (HasS2 && S2.size() == 1)
  447. return emitStrChr(CI->getArgOperand(0), S2[0], B, TLI);
  448. return nullptr;
  449. }
  450. Value *LibCallSimplifier::optimizeStrTo(CallInst *CI, IRBuilder<> &B) {
  451. Value *EndPtr = CI->getArgOperand(1);
  452. if (isa<ConstantPointerNull>(EndPtr)) {
  453. // With a null EndPtr, this function won't capture the main argument.
  454. // It would be readonly too, except that it still may write to errno.
  455. CI->addAttribute(1, Attribute::NoCapture);
  456. }
  457. return nullptr;
  458. }
  459. Value *LibCallSimplifier::optimizeStrSpn(CallInst *CI, IRBuilder<> &B) {
  460. StringRef S1, S2;
  461. bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
  462. bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
  463. // strspn(s, "") -> 0
  464. // strspn("", s) -> 0
  465. if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
  466. return Constant::getNullValue(CI->getType());
  467. // Constant folding.
  468. if (HasS1 && HasS2) {
  469. size_t Pos = S1.find_first_not_of(S2);
  470. if (Pos == StringRef::npos)
  471. Pos = S1.size();
  472. return ConstantInt::get(CI->getType(), Pos);
  473. }
  474. return nullptr;
  475. }
  476. Value *LibCallSimplifier::optimizeStrCSpn(CallInst *CI, IRBuilder<> &B) {
  477. StringRef S1, S2;
  478. bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
  479. bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
  480. // strcspn("", s) -> 0
  481. if (HasS1 && S1.empty())
  482. return Constant::getNullValue(CI->getType());
  483. // Constant folding.
  484. if (HasS1 && HasS2) {
  485. size_t Pos = S1.find_first_of(S2);
  486. if (Pos == StringRef::npos)
  487. Pos = S1.size();
  488. return ConstantInt::get(CI->getType(), Pos);
  489. }
  490. // strcspn(s, "") -> strlen(s)
  491. if (HasS2 && S2.empty())
  492. return emitStrLen(CI->getArgOperand(0), B, DL, TLI);
  493. return nullptr;
  494. }
  495. Value *LibCallSimplifier::optimizeStrStr(CallInst *CI, IRBuilder<> &B) {
  496. // fold strstr(x, x) -> x.
  497. if (CI->getArgOperand(0) == CI->getArgOperand(1))
  498. return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
  499. // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
  500. if (isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
  501. Value *StrLen = emitStrLen(CI->getArgOperand(1), B, DL, TLI);
  502. if (!StrLen)
  503. return nullptr;
  504. Value *StrNCmp = emitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
  505. StrLen, B, DL, TLI);
  506. if (!StrNCmp)
  507. return nullptr;
  508. for (auto UI = CI->user_begin(), UE = CI->user_end(); UI != UE;) {
  509. ICmpInst *Old = cast<ICmpInst>(*UI++);
  510. Value *Cmp =
  511. B.CreateICmp(Old->getPredicate(), StrNCmp,
  512. ConstantInt::getNullValue(StrNCmp->getType()), "cmp");
  513. replaceAllUsesWith(Old, Cmp);
  514. }
  515. return CI;
  516. }
  517. // See if either input string is a constant string.
  518. StringRef SearchStr, ToFindStr;
  519. bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr);
  520. bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr);
  521. // fold strstr(x, "") -> x.
  522. if (HasStr2 && ToFindStr.empty())
  523. return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
  524. // If both strings are known, constant fold it.
  525. if (HasStr1 && HasStr2) {
  526. size_t Offset = SearchStr.find(ToFindStr);
  527. if (Offset == StringRef::npos) // strstr("foo", "bar") -> null
  528. return Constant::getNullValue(CI->getType());
  529. // strstr("abcd", "bc") -> gep((char*)"abcd", 1)
  530. Value *Result = castToCStr(CI->getArgOperand(0), B);
  531. Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr");
  532. return B.CreateBitCast(Result, CI->getType());
  533. }
  534. // fold strstr(x, "y") -> strchr(x, 'y').
  535. if (HasStr2 && ToFindStr.size() == 1) {
  536. Value *StrChr = emitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TLI);
  537. return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : nullptr;
  538. }
  539. return nullptr;
  540. }
  541. Value *LibCallSimplifier::optimizeMemChr(CallInst *CI, IRBuilder<> &B) {
  542. Value *SrcStr = CI->getArgOperand(0);
  543. ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  544. ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
  545. // memchr(x, y, 0) -> null
  546. if (LenC && LenC->isNullValue())
  547. return Constant::getNullValue(CI->getType());
  548. // From now on we need at least constant length and string.
  549. StringRef Str;
  550. if (!LenC || !getConstantStringInfo(SrcStr, Str, 0, /*TrimAtNul=*/false))
  551. return nullptr;
  552. // Truncate the string to LenC. If Str is smaller than LenC we will still only
  553. // scan the string, as reading past the end of it is undefined and we can just
  554. // return null if we don't find the char.
  555. Str = Str.substr(0, LenC->getZExtValue());
  556. // If the char is variable but the input str and length are not we can turn
  557. // this memchr call into a simple bit field test. Of course this only works
  558. // when the return value is only checked against null.
  559. //
  560. // It would be really nice to reuse switch lowering here but we can't change
  561. // the CFG at this point.
  562. //
  563. // memchr("\r\n", C, 2) != nullptr -> (C & ((1 << '\r') | (1 << '\n'))) != 0
  564. // after bounds check.
  565. if (!CharC && !Str.empty() && isOnlyUsedInZeroEqualityComparison(CI)) {
  566. unsigned char Max =
  567. *std::max_element(reinterpret_cast<const unsigned char *>(Str.begin()),
  568. reinterpret_cast<const unsigned char *>(Str.end()));
  569. // Make sure the bit field we're about to create fits in a register on the
  570. // target.
  571. // FIXME: On a 64 bit architecture this prevents us from using the
  572. // interesting range of alpha ascii chars. We could do better by emitting
  573. // two bitfields or shifting the range by 64 if no lower chars are used.
  574. if (!DL.fitsInLegalInteger(Max + 1))
  575. return nullptr;
  576. // For the bit field use a power-of-2 type with at least 8 bits to avoid
  577. // creating unnecessary illegal types.
  578. unsigned char Width = NextPowerOf2(std::max((unsigned char)7, Max));
  579. // Now build the bit field.
  580. APInt Bitfield(Width, 0);
  581. for (char C : Str)
  582. Bitfield.setBit((unsigned char)C);
  583. Value *BitfieldC = B.getInt(Bitfield);
  584. // First check that the bit field access is within bounds.
  585. Value *C = B.CreateZExtOrTrunc(CI->getArgOperand(1), BitfieldC->getType());
  586. Value *Bounds = B.CreateICmp(ICmpInst::ICMP_ULT, C, B.getIntN(Width, Width),
  587. "memchr.bounds");
  588. // Create code that checks if the given bit is set in the field.
  589. Value *Shl = B.CreateShl(B.getIntN(Width, 1ULL), C);
  590. Value *Bits = B.CreateIsNotNull(B.CreateAnd(Shl, BitfieldC), "memchr.bits");
  591. // Finally merge both checks and cast to pointer type. The inttoptr
  592. // implicitly zexts the i1 to intptr type.
  593. return B.CreateIntToPtr(B.CreateAnd(Bounds, Bits, "memchr"), CI->getType());
  594. }
  595. // Check if all arguments are constants. If so, we can constant fold.
  596. if (!CharC)
  597. return nullptr;
  598. // Compute the offset.
  599. size_t I = Str.find(CharC->getSExtValue() & 0xFF);
  600. if (I == StringRef::npos) // Didn't find the char. memchr returns null.
  601. return Constant::getNullValue(CI->getType());
  602. // memchr(s+n,c,l) -> gep(s+n+i,c)
  603. return B.CreateGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "memchr");
  604. }
  605. Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilder<> &B) {
  606. Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
  607. if (LHS == RHS) // memcmp(s,s,x) -> 0
  608. return Constant::getNullValue(CI->getType());
  609. // Make sure we have a constant length.
  610. ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
  611. if (!LenC)
  612. return nullptr;
  613. uint64_t Len = LenC->getZExtValue();
  614. if (Len == 0) // memcmp(s1,s2,0) -> 0
  615. return Constant::getNullValue(CI->getType());
  616. // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
  617. if (Len == 1) {
  618. Value *LHSV = B.CreateZExt(B.CreateLoad(castToCStr(LHS, B), "lhsc"),
  619. CI->getType(), "lhsv");
  620. Value *RHSV = B.CreateZExt(B.CreateLoad(castToCStr(RHS, B), "rhsc"),
  621. CI->getType(), "rhsv");
  622. return B.CreateSub(LHSV, RHSV, "chardiff");
  623. }
  624. // memcmp(S1,S2,N/8)==0 -> (*(intN_t*)S1 != *(intN_t*)S2)==0
  625. if (DL.isLegalInteger(Len * 8) && isOnlyUsedInZeroEqualityComparison(CI)) {
  626. IntegerType *IntType = IntegerType::get(CI->getContext(), Len * 8);
  627. unsigned PrefAlignment = DL.getPrefTypeAlignment(IntType);
  628. if (getKnownAlignment(LHS, DL, CI) >= PrefAlignment &&
  629. getKnownAlignment(RHS, DL, CI) >= PrefAlignment) {
  630. Type *LHSPtrTy =
  631. IntType->getPointerTo(LHS->getType()->getPointerAddressSpace());
  632. Type *RHSPtrTy =
  633. IntType->getPointerTo(RHS->getType()->getPointerAddressSpace());
  634. Value *LHSV =
  635. B.CreateLoad(B.CreateBitCast(LHS, LHSPtrTy, "lhsc"), "lhsv");
  636. Value *RHSV =
  637. B.CreateLoad(B.CreateBitCast(RHS, RHSPtrTy, "rhsc"), "rhsv");
  638. return B.CreateZExt(B.CreateICmpNE(LHSV, RHSV), CI->getType(), "memcmp");
  639. }
  640. }
  641. // Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant)
  642. StringRef LHSStr, RHSStr;
  643. if (getConstantStringInfo(LHS, LHSStr) &&
  644. getConstantStringInfo(RHS, RHSStr)) {
  645. // Make sure we're not reading out-of-bounds memory.
  646. if (Len > LHSStr.size() || Len > RHSStr.size())
  647. return nullptr;
  648. // Fold the memcmp and normalize the result. This way we get consistent
  649. // results across multiple platforms.
  650. uint64_t Ret = 0;
  651. int Cmp = memcmp(LHSStr.data(), RHSStr.data(), Len);
  652. if (Cmp < 0)
  653. Ret = -1;
  654. else if (Cmp > 0)
  655. Ret = 1;
  656. return ConstantInt::get(CI->getType(), Ret);
  657. }
  658. return nullptr;
  659. }
  660. Value *LibCallSimplifier::optimizeMemCpy(CallInst *CI, IRBuilder<> &B) {
  661. // memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
  662. B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
  663. CI->getArgOperand(2), 1);
  664. return CI->getArgOperand(0);
  665. }
  666. Value *LibCallSimplifier::optimizeMemMove(CallInst *CI, IRBuilder<> &B) {
  667. // memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
  668. B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
  669. CI->getArgOperand(2), 1);
  670. return CI->getArgOperand(0);
  671. }
  672. // TODO: Does this belong in BuildLibCalls or should all of those similar
  673. // functions be moved here?
  674. static Value *emitCalloc(Value *Num, Value *Size, const AttributeList &Attrs,
  675. IRBuilder<> &B, const TargetLibraryInfo &TLI) {
  676. LibFunc Func;
  677. if (!TLI.getLibFunc("calloc", Func) || !TLI.has(Func))
  678. return nullptr;
  679. Module *M = B.GetInsertBlock()->getModule();
  680. const DataLayout &DL = M->getDataLayout();
  681. IntegerType *PtrType = DL.getIntPtrType((B.GetInsertBlock()->getContext()));
  682. Value *Calloc = M->getOrInsertFunction("calloc", Attrs, B.getInt8PtrTy(),
  683. PtrType, PtrType);
  684. CallInst *CI = B.CreateCall(Calloc, { Num, Size }, "calloc");
  685. if (const auto *F = dyn_cast<Function>(Calloc->stripPointerCasts()))
  686. CI->setCallingConv(F->getCallingConv());
  687. return CI;
  688. }
  689. /// Fold memset[_chk](malloc(n), 0, n) --> calloc(1, n).
  690. static Value *foldMallocMemset(CallInst *Memset, IRBuilder<> &B,
  691. const TargetLibraryInfo &TLI) {
  692. // This has to be a memset of zeros (bzero).
  693. auto *FillValue = dyn_cast<ConstantInt>(Memset->getArgOperand(1));
  694. if (!FillValue || FillValue->getZExtValue() != 0)
  695. return nullptr;
  696. // TODO: We should handle the case where the malloc has more than one use.
  697. // This is necessary to optimize common patterns such as when the result of
  698. // the malloc is checked against null or when a memset intrinsic is used in
  699. // place of a memset library call.
  700. auto *Malloc = dyn_cast<CallInst>(Memset->getArgOperand(0));
  701. if (!Malloc || !Malloc->hasOneUse())
  702. return nullptr;
  703. // Is the inner call really malloc()?
  704. Function *InnerCallee = Malloc->getCalledFunction();
  705. LibFunc Func;
  706. if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
  707. Func != LibFunc_malloc)
  708. return nullptr;
  709. // The memset must cover the same number of bytes that are malloc'd.
  710. if (Memset->getArgOperand(2) != Malloc->getArgOperand(0))
  711. return nullptr;
  712. // Replace the malloc with a calloc. We need the data layout to know what the
  713. // actual size of a 'size_t' parameter is.
  714. B.SetInsertPoint(Malloc->getParent(), ++Malloc->getIterator());
  715. const DataLayout &DL = Malloc->getModule()->getDataLayout();
  716. IntegerType *SizeType = DL.getIntPtrType(B.GetInsertBlock()->getContext());
  717. Value *Calloc = emitCalloc(ConstantInt::get(SizeType, 1),
  718. Malloc->getArgOperand(0), Malloc->getAttributes(),
  719. B, TLI);
  720. if (!Calloc)
  721. return nullptr;
  722. Malloc->replaceAllUsesWith(Calloc);
  723. Malloc->eraseFromParent();
  724. return Calloc;
  725. }
  726. Value *LibCallSimplifier::optimizeMemSet(CallInst *CI, IRBuilder<> &B) {
  727. if (auto *Calloc = foldMallocMemset(CI, B, *TLI))
  728. return Calloc;
  729. // memset(p, v, n) -> llvm.memset(p, v, n, 1)
  730. Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
  731. B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
  732. return CI->getArgOperand(0);
  733. }
  734. //===----------------------------------------------------------------------===//
  735. // Math Library Optimizations
  736. //===----------------------------------------------------------------------===//
  737. /// Return a variant of Val with float type.
  738. /// Currently this works in two cases: If Val is an FPExtension of a float
  739. /// value to something bigger, simply return the operand.
  740. /// If Val is a ConstantFP but can be converted to a float ConstantFP without
  741. /// loss of precision do so.
  742. static Value *valueHasFloatPrecision(Value *Val) {
  743. if (FPExtInst *Cast = dyn_cast<FPExtInst>(Val)) {
  744. Value *Op = Cast->getOperand(0);
  745. if (Op->getType()->isFloatTy())
  746. return Op;
  747. }
  748. if (ConstantFP *Const = dyn_cast<ConstantFP>(Val)) {
  749. APFloat F = Const->getValueAPF();
  750. bool losesInfo;
  751. (void)F.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
  752. &losesInfo);
  753. if (!losesInfo)
  754. return ConstantFP::get(Const->getContext(), F);
  755. }
  756. return nullptr;
  757. }
  758. /// Shrink double -> float for unary functions like 'floor'.
  759. static Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilder<> &B,
  760. bool CheckRetType) {
  761. Function *Callee = CI->getCalledFunction();
  762. // We know this libcall has a valid prototype, but we don't know which.
  763. if (!CI->getType()->isDoubleTy())
  764. return nullptr;
  765. if (CheckRetType) {
  766. // Check if all the uses for function like 'sin' are converted to float.
  767. for (User *U : CI->users()) {
  768. FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
  769. if (!Cast || !Cast->getType()->isFloatTy())
  770. return nullptr;
  771. }
  772. }
  773. // If this is something like 'floor((double)floatval)', convert to floorf.
  774. Value *V = valueHasFloatPrecision(CI->getArgOperand(0));
  775. if (V == nullptr)
  776. return nullptr;
  777. // Propagate fast-math flags from the existing call to the new call.
  778. IRBuilder<>::FastMathFlagGuard Guard(B);
  779. B.setFastMathFlags(CI->getFastMathFlags());
  780. // floor((double)floatval) -> (double)floorf(floatval)
  781. if (Callee->isIntrinsic()) {
  782. Module *M = CI->getModule();
  783. Intrinsic::ID IID = Callee->getIntrinsicID();
  784. Function *F = Intrinsic::getDeclaration(M, IID, B.getFloatTy());
  785. V = B.CreateCall(F, V);
  786. } else {
  787. // The call is a library call rather than an intrinsic.
  788. V = emitUnaryFloatFnCall(V, Callee->getName(), B, Callee->getAttributes());
  789. }
  790. return B.CreateFPExt(V, B.getDoubleTy());
  791. }
  792. // Replace a libcall \p CI with a call to intrinsic \p IID
  793. static Value *replaceUnaryCall(CallInst *CI, IRBuilder<> &B, Intrinsic::ID IID) {
  794. // Propagate fast-math flags from the existing call to the new call.
  795. IRBuilder<>::FastMathFlagGuard Guard(B);
  796. B.setFastMathFlags(CI->getFastMathFlags());
  797. Module *M = CI->getModule();
  798. Value *V = CI->getArgOperand(0);
  799. Function *F = Intrinsic::getDeclaration(M, IID, CI->getType());
  800. CallInst *NewCall = B.CreateCall(F, V);
  801. NewCall->takeName(CI);
  802. return NewCall;
  803. }
  804. /// Shrink double -> float for binary functions like 'fmin/fmax'.
  805. static Value *optimizeBinaryDoubleFP(CallInst *CI, IRBuilder<> &B) {
  806. Function *Callee = CI->getCalledFunction();
  807. // We know this libcall has a valid prototype, but we don't know which.
  808. if (!CI->getType()->isDoubleTy())
  809. return nullptr;
  810. // If this is something like 'fmin((double)floatval1, (double)floatval2)',
  811. // or fmin(1.0, (double)floatval), then we convert it to fminf.
  812. Value *V1 = valueHasFloatPrecision(CI->getArgOperand(0));
  813. if (V1 == nullptr)
  814. return nullptr;
  815. Value *V2 = valueHasFloatPrecision(CI->getArgOperand(1));
  816. if (V2 == nullptr)
  817. return nullptr;
  818. // Propagate fast-math flags from the existing call to the new call.
  819. IRBuilder<>::FastMathFlagGuard Guard(B);
  820. B.setFastMathFlags(CI->getFastMathFlags());
  821. // fmin((double)floatval1, (double)floatval2)
  822. // -> (double)fminf(floatval1, floatval2)
  823. // TODO: Handle intrinsics in the same way as in optimizeUnaryDoubleFP().
  824. Value *V = emitBinaryFloatFnCall(V1, V2, Callee->getName(), B,
  825. Callee->getAttributes());
  826. return B.CreateFPExt(V, B.getDoubleTy());
  827. }
  828. Value *LibCallSimplifier::optimizeCos(CallInst *CI, IRBuilder<> &B) {
  829. Function *Callee = CI->getCalledFunction();
  830. Value *Ret = nullptr;
  831. StringRef Name = Callee->getName();
  832. if (UnsafeFPShrink && Name == "cos" && hasFloatVersion(Name))
  833. Ret = optimizeUnaryDoubleFP(CI, B, true);
  834. // cos(-x) -> cos(x)
  835. Value *Op1 = CI->getArgOperand(0);
  836. if (BinaryOperator::isFNeg(Op1)) {
  837. BinaryOperator *BinExpr = cast<BinaryOperator>(Op1);
  838. return B.CreateCall(Callee, BinExpr->getOperand(1), "cos");
  839. }
  840. return Ret;
  841. }
  842. static Value *getPow(Value *InnerChain[33], unsigned Exp, IRBuilder<> &B) {
  843. // Multiplications calculated using Addition Chains.
  844. // Refer: http://wwwhomes.uni-bielefeld.de/achim/addition_chain.html
  845. assert(Exp != 0 && "Incorrect exponent 0 not handled");
  846. if (InnerChain[Exp])
  847. return InnerChain[Exp];
  848. static const unsigned AddChain[33][2] = {
  849. {0, 0}, // Unused.
  850. {0, 0}, // Unused (base case = pow1).
  851. {1, 1}, // Unused (pre-computed).
  852. {1, 2}, {2, 2}, {2, 3}, {3, 3}, {2, 5}, {4, 4},
  853. {1, 8}, {5, 5}, {1, 10}, {6, 6}, {4, 9}, {7, 7},
  854. {3, 12}, {8, 8}, {8, 9}, {2, 16}, {1, 18}, {10, 10},
  855. {6, 15}, {11, 11}, {3, 20}, {12, 12}, {8, 17}, {13, 13},
  856. {3, 24}, {14, 14}, {4, 25}, {15, 15}, {3, 28}, {16, 16},
  857. };
  858. InnerChain[Exp] = B.CreateFMul(getPow(InnerChain, AddChain[Exp][0], B),
  859. getPow(InnerChain, AddChain[Exp][1], B));
  860. return InnerChain[Exp];
  861. }
  862. Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) {
  863. Function *Callee = CI->getCalledFunction();
  864. Value *Ret = nullptr;
  865. StringRef Name = Callee->getName();
  866. if (UnsafeFPShrink && Name == "pow" && hasFloatVersion(Name))
  867. Ret = optimizeUnaryDoubleFP(CI, B, true);
  868. Value *Op1 = CI->getArgOperand(0), *Op2 = CI->getArgOperand(1);
  869. // pow(1.0, x) -> 1.0
  870. if (match(Op1, m_SpecificFP(1.0)))
  871. return Op1;
  872. // pow(2.0, x) -> llvm.exp2(x)
  873. if (match(Op1, m_SpecificFP(2.0))) {
  874. Value *Exp2 = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::exp2,
  875. CI->getType());
  876. return B.CreateCall(Exp2, Op2, "exp2");
  877. }
  878. // There's no llvm.exp10 intrinsic yet, but, maybe, some day there will
  879. // be one.
  880. if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1)) {
  881. // pow(10.0, x) -> exp10(x)
  882. if (Op1C->isExactlyValue(10.0) &&
  883. hasUnaryFloatFn(TLI, Op1->getType(), LibFunc_exp10, LibFunc_exp10f,
  884. LibFunc_exp10l))
  885. return emitUnaryFloatFnCall(Op2, TLI->getName(LibFunc_exp10), B,
  886. Callee->getAttributes());
  887. }
  888. // pow(exp(x), y) -> exp(x * y)
  889. // pow(exp2(x), y) -> exp2(x * y)
  890. // We enable these only with fast-math. Besides rounding differences, the
  891. // transformation changes overflow and underflow behavior quite dramatically.
  892. // Example: x = 1000, y = 0.001.
  893. // pow(exp(x), y) = pow(inf, 0.001) = inf, whereas exp(x*y) = exp(1).
  894. auto *OpC = dyn_cast<CallInst>(Op1);
  895. if (OpC && OpC->hasUnsafeAlgebra() && CI->hasUnsafeAlgebra()) {
  896. LibFunc Func;
  897. Function *OpCCallee = OpC->getCalledFunction();
  898. if (OpCCallee && TLI->getLibFunc(OpCCallee->getName(), Func) &&
  899. TLI->has(Func) && (Func == LibFunc_exp || Func == LibFunc_exp2)) {
  900. IRBuilder<>::FastMathFlagGuard Guard(B);
  901. B.setFastMathFlags(CI->getFastMathFlags());
  902. Value *FMul = B.CreateFMul(OpC->getArgOperand(0), Op2, "mul");
  903. return emitUnaryFloatFnCall(FMul, OpCCallee->getName(), B,
  904. OpCCallee->getAttributes());
  905. }
  906. }
  907. ConstantFP *Op2C = dyn_cast<ConstantFP>(Op2);
  908. if (!Op2C)
  909. return Ret;
  910. if (Op2C->getValueAPF().isZero()) // pow(x, 0.0) -> 1.0
  911. return ConstantFP::get(CI->getType(), 1.0);
  912. if (Op2C->isExactlyValue(-0.5) &&
  913. hasUnaryFloatFn(TLI, Op2->getType(), LibFunc_sqrt, LibFunc_sqrtf,
  914. LibFunc_sqrtl)) {
  915. // If -ffast-math:
  916. // pow(x, -0.5) -> 1.0 / sqrt(x)
  917. if (CI->hasUnsafeAlgebra()) {
  918. IRBuilder<>::FastMathFlagGuard Guard(B);
  919. B.setFastMathFlags(CI->getFastMathFlags());
  920. // TODO: If the pow call is an intrinsic, we should lower to the sqrt
  921. // intrinsic, so we match errno semantics. We also should check that the
  922. // target can in fact lower the sqrt intrinsic -- we currently have no way
  923. // to ask this question other than asking whether the target has a sqrt
  924. // libcall, which is a sufficient but not necessary condition.
  925. Value *Sqrt = emitUnaryFloatFnCall(Op1, TLI->getName(LibFunc_sqrt), B,
  926. Callee->getAttributes());
  927. return B.CreateFDiv(ConstantFP::get(CI->getType(), 1.0), Sqrt, "sqrtrecip");
  928. }
  929. }
  930. if (Op2C->isExactlyValue(0.5) &&
  931. hasUnaryFloatFn(TLI, Op2->getType(), LibFunc_sqrt, LibFunc_sqrtf,
  932. LibFunc_sqrtl)) {
  933. // In -ffast-math, pow(x, 0.5) -> sqrt(x).
  934. if (CI->hasUnsafeAlgebra()) {
  935. IRBuilder<>::FastMathFlagGuard Guard(B);
  936. B.setFastMathFlags(CI->getFastMathFlags());
  937. // TODO: As above, we should lower to the sqrt intrinsic if the pow is an
  938. // intrinsic, to match errno semantics.
  939. return emitUnaryFloatFnCall(Op1, TLI->getName(LibFunc_sqrt), B,
  940. Callee->getAttributes());
  941. }
  942. // Expand pow(x, 0.5) to (x == -infinity ? +infinity : fabs(sqrt(x))).
  943. // This is faster than calling pow, and still handles negative zero
  944. // and negative infinity correctly.
  945. // TODO: In finite-only mode, this could be just fabs(sqrt(x)).
  946. Value *Inf = ConstantFP::getInfinity(CI->getType());
  947. Value *NegInf = ConstantFP::getInfinity(CI->getType(), true);
  948. // TODO: As above, we should lower to the sqrt intrinsic if the pow is an
  949. // intrinsic, to match errno semantics.
  950. Value *Sqrt = emitUnaryFloatFnCall(Op1, "sqrt", B, Callee->getAttributes());
  951. Module *M = Callee->getParent();
  952. Function *FabsF = Intrinsic::getDeclaration(M, Intrinsic::fabs,
  953. CI->getType());
  954. Value *FAbs = B.CreateCall(FabsF, Sqrt);
  955. Value *FCmp = B.CreateFCmpOEQ(Op1, NegInf);
  956. Value *Sel = B.CreateSelect(FCmp, Inf, FAbs);
  957. return Sel;
  958. }
  959. if (Op2C->isExactlyValue(1.0)) // pow(x, 1.0) -> x
  960. return Op1;
  961. if (Op2C->isExactlyValue(2.0)) // pow(x, 2.0) -> x*x
  962. return B.CreateFMul(Op1, Op1, "pow2");
  963. if (Op2C->isExactlyValue(-1.0)) // pow(x, -1.0) -> 1.0/x
  964. return B.CreateFDiv(ConstantFP::get(CI->getType(), 1.0), Op1, "powrecip");
  965. // In -ffast-math, generate repeated fmul instead of generating pow(x, n).
  966. if (CI->hasUnsafeAlgebra()) {
  967. APFloat V = abs(Op2C->getValueAPF());
  968. // We limit to a max of 7 fmul(s). Thus max exponent is 32.
  969. // This transformation applies to integer exponents only.
  970. if (V.compare(APFloat(V.getSemantics(), 32.0)) == APFloat::cmpGreaterThan ||
  971. !V.isInteger())
  972. return nullptr;
  973. // Propagate fast math flags.
  974. IRBuilder<>::FastMathFlagGuard Guard(B);
  975. B.setFastMathFlags(CI->getFastMathFlags());
  976. // We will memoize intermediate products of the Addition Chain.
  977. Value *InnerChain[33] = {nullptr};
  978. InnerChain[1] = Op1;
  979. InnerChain[2] = B.CreateFMul(Op1, Op1);
  980. // We cannot readily convert a non-double type (like float) to a double.
  981. // So we first convert V to something which could be converted to double.
  982. bool ignored;
  983. V.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &ignored);
  984. Value *FMul = getPow(InnerChain, V.convertToDouble(), B);
  985. // For negative exponents simply compute the reciprocal.
  986. if (Op2C->isNegative())
  987. FMul = B.CreateFDiv(ConstantFP::get(CI->getType(), 1.0), FMul);
  988. return FMul;
  989. }
  990. return nullptr;
  991. }
  992. Value *LibCallSimplifier::optimizeExp2(CallInst *CI, IRBuilder<> &B) {
  993. Function *Callee = CI->getCalledFunction();
  994. Value *Ret = nullptr;
  995. StringRef Name = Callee->getName();
  996. if (UnsafeFPShrink && Name == "exp2" && hasFloatVersion(Name))
  997. Ret = optimizeUnaryDoubleFP(CI, B, true);
  998. Value *Op = CI->getArgOperand(0);
  999. // Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x)) if sizeof(x) <= 32
  1000. // Turn exp2(uitofp(x)) -> ldexp(1.0, zext(x)) if sizeof(x) < 32
  1001. LibFunc LdExp = LibFunc_ldexpl;
  1002. if (Op->getType()->isFloatTy())
  1003. LdExp = LibFunc_ldexpf;
  1004. else if (Op->getType()->isDoubleTy())
  1005. LdExp = LibFunc_ldexp;
  1006. if (TLI->has(LdExp)) {
  1007. Value *LdExpArg = nullptr;
  1008. if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
  1009. if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
  1010. LdExpArg = B.CreateSExt(OpC->getOperand(0), B.getInt32Ty());
  1011. } else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
  1012. if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
  1013. LdExpArg = B.CreateZExt(OpC->getOperand(0), B.getInt32Ty());
  1014. }
  1015. if (LdExpArg) {
  1016. Constant *One = ConstantFP::get(CI->getContext(), APFloat(1.0f));
  1017. if (!Op->getType()->isFloatTy())
  1018. One = ConstantExpr::getFPExtend(One, Op->getType());
  1019. Module *M = CI->getModule();
  1020. Value *NewCallee =
  1021. M->getOrInsertFunction(TLI->getName(LdExp), Op->getType(),
  1022. Op->getType(), B.getInt32Ty());
  1023. CallInst *CI = B.CreateCall(NewCallee, {One, LdExpArg});
  1024. if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
  1025. CI->setCallingConv(F->getCallingConv());
  1026. return CI;
  1027. }
  1028. }
  1029. return Ret;
  1030. }
  1031. Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilder<> &B) {
  1032. Function *Callee = CI->getCalledFunction();
  1033. // If we can shrink the call to a float function rather than a double
  1034. // function, do that first.
  1035. StringRef Name = Callee->getName();
  1036. if ((Name == "fmin" || Name == "fmax") && hasFloatVersion(Name))
  1037. if (Value *Ret = optimizeBinaryDoubleFP(CI, B))
  1038. return Ret;
  1039. IRBuilder<>::FastMathFlagGuard Guard(B);
  1040. FastMathFlags FMF;
  1041. if (CI->hasUnsafeAlgebra()) {
  1042. // Unsafe algebra sets all fast-math-flags to true.
  1043. FMF.setUnsafeAlgebra();
  1044. } else {
  1045. // At a minimum, no-nans-fp-math must be true.
  1046. if (!CI->hasNoNaNs())
  1047. return nullptr;
  1048. // No-signed-zeros is implied by the definitions of fmax/fmin themselves:
  1049. // "Ideally, fmax would be sensitive to the sign of zero, for example
  1050. // fmax(-0. 0, +0. 0) would return +0; however, implementation in software
  1051. // might be impractical."
  1052. FMF.setNoSignedZeros();
  1053. FMF.setNoNaNs();
  1054. }
  1055. B.setFastMathFlags(FMF);
  1056. // We have a relaxed floating-point environment. We can ignore NaN-handling
  1057. // and transform to a compare and select. We do not have to consider errno or
  1058. // exceptions, because fmin/fmax do not have those.
  1059. Value *Op0 = CI->getArgOperand(0);
  1060. Value *Op1 = CI->getArgOperand(1);
  1061. Value *Cmp = Callee->getName().startswith("fmin") ?
  1062. B.CreateFCmpOLT(Op0, Op1) : B.CreateFCmpOGT(Op0, Op1);
  1063. return B.CreateSelect(Cmp, Op0, Op1);
  1064. }
  1065. Value *LibCallSimplifier::optimizeLog(CallInst *CI, IRBuilder<> &B) {
  1066. Function *Callee = CI->getCalledFunction();
  1067. Value *Ret = nullptr;
  1068. StringRef Name = Callee->getName();
  1069. if (UnsafeFPShrink && hasFloatVersion(Name))
  1070. Ret = optimizeUnaryDoubleFP(CI, B, true);
  1071. if (!CI->hasUnsafeAlgebra())
  1072. return Ret;
  1073. Value *Op1 = CI->getArgOperand(0);
  1074. auto *OpC = dyn_cast<CallInst>(Op1);
  1075. // The earlier call must also be unsafe in order to do these transforms.
  1076. if (!OpC || !OpC->hasUnsafeAlgebra())
  1077. return Ret;
  1078. // log(pow(x,y)) -> y*log(x)
  1079. // This is only applicable to log, log2, log10.
  1080. if (Name != "log" && Name != "log2" && Name != "log10")
  1081. return Ret;
  1082. IRBuilder<>::FastMathFlagGuard Guard(B);
  1083. FastMathFlags FMF;
  1084. FMF.setUnsafeAlgebra();
  1085. B.setFastMathFlags(FMF);
  1086. LibFunc Func;
  1087. Function *F = OpC->getCalledFunction();
  1088. if (F && ((TLI->getLibFunc(F->getName(), Func) && TLI->has(Func) &&
  1089. Func == LibFunc_pow) || F->getIntrinsicID() == Intrinsic::pow))
  1090. return B.CreateFMul(OpC->getArgOperand(1),
  1091. emitUnaryFloatFnCall(OpC->getOperand(0), Callee->getName(), B,
  1092. Callee->getAttributes()), "mul");
  1093. // log(exp2(y)) -> y*log(2)
  1094. if (F && Name == "log" && TLI->getLibFunc(F->getName(), Func) &&
  1095. TLI->has(Func) && Func == LibFunc_exp2)
  1096. return B.CreateFMul(
  1097. OpC->getArgOperand(0),
  1098. emitUnaryFloatFnCall(ConstantFP::get(CI->getType(), 2.0),
  1099. Callee->getName(), B, Callee->getAttributes()),
  1100. "logmul");
  1101. return Ret;
  1102. }
  1103. Value *LibCallSimplifier::optimizeSqrt(CallInst *CI, IRBuilder<> &B) {
  1104. Function *Callee = CI->getCalledFunction();
  1105. Value *Ret = nullptr;
  1106. // TODO: Once we have a way (other than checking for the existince of the
  1107. // libcall) to tell whether our target can lower @llvm.sqrt, relax the
  1108. // condition below.
  1109. if (TLI->has(LibFunc_sqrtf) && (Callee->getName() == "sqrt" ||
  1110. Callee->getIntrinsicID() == Intrinsic::sqrt))
  1111. Ret = optimizeUnaryDoubleFP(CI, B, true);
  1112. if (!CI->hasUnsafeAlgebra())
  1113. return Ret;
  1114. Instruction *I = dyn_cast<Instruction>(CI->getArgOperand(0));
  1115. if (!I || I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra())
  1116. return Ret;
  1117. // We're looking for a repeated factor in a multiplication tree,
  1118. // so we can do this fold: sqrt(x * x) -> fabs(x);
  1119. // or this fold: sqrt((x * x) * y) -> fabs(x) * sqrt(y).
  1120. Value *Op0 = I->getOperand(0);
  1121. Value *Op1 = I->getOperand(1);
  1122. Value *RepeatOp = nullptr;
  1123. Value *OtherOp = nullptr;
  1124. if (Op0 == Op1) {
  1125. // Simple match: the operands of the multiply are identical.
  1126. RepeatOp = Op0;
  1127. } else {
  1128. // Look for a more complicated pattern: one of the operands is itself
  1129. // a multiply, so search for a common factor in that multiply.
  1130. // Note: We don't bother looking any deeper than this first level or for
  1131. // variations of this pattern because instcombine's visitFMUL and/or the
  1132. // reassociation pass should give us this form.
  1133. Value *OtherMul0, *OtherMul1;
  1134. if (match(Op0, m_FMul(m_Value(OtherMul0), m_Value(OtherMul1)))) {
  1135. // Pattern: sqrt((x * y) * z)
  1136. if (OtherMul0 == OtherMul1 &&
  1137. cast<Instruction>(Op0)->hasUnsafeAlgebra()) {
  1138. // Matched: sqrt((x * x) * z)
  1139. RepeatOp = OtherMul0;
  1140. OtherOp = Op1;
  1141. }
  1142. }
  1143. }
  1144. if (!RepeatOp)
  1145. return Ret;
  1146. // Fast math flags for any created instructions should match the sqrt
  1147. // and multiply.
  1148. IRBuilder<>::FastMathFlagGuard Guard(B);
  1149. B.setFastMathFlags(I->getFastMathFlags());
  1150. // If we found a repeated factor, hoist it out of the square root and
  1151. // replace it with the fabs of that factor.
  1152. Module *M = Callee->getParent();
  1153. Type *ArgType = I->getType();
  1154. Value *Fabs = Intrinsic::getDeclaration(M, Intrinsic::fabs, ArgType);
  1155. Value *FabsCall = B.CreateCall(Fabs, RepeatOp, "fabs");
  1156. if (OtherOp) {
  1157. // If we found a non-repeated factor, we still need to get its square
  1158. // root. We then multiply that by the value that was simplified out
  1159. // of the square root calculation.
  1160. Value *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, ArgType);
  1161. Value *SqrtCall = B.CreateCall(Sqrt, OtherOp, "sqrt");
  1162. return B.CreateFMul(FabsCall, SqrtCall);
  1163. }
  1164. return FabsCall;
  1165. }
  1166. // TODO: Generalize to handle any trig function and its inverse.
  1167. Value *LibCallSimplifier::optimizeTan(CallInst *CI, IRBuilder<> &B) {
  1168. Function *Callee = CI->getCalledFunction();
  1169. Value *Ret = nullptr;
  1170. StringRef Name = Callee->getName();
  1171. if (UnsafeFPShrink && Name == "tan" && hasFloatVersion(Name))
  1172. Ret = optimizeUnaryDoubleFP(CI, B, true);
  1173. Value *Op1 = CI->getArgOperand(0);
  1174. auto *OpC = dyn_cast<CallInst>(Op1);
  1175. if (!OpC)
  1176. return Ret;
  1177. // Both calls must allow unsafe optimizations in order to remove them.
  1178. if (!CI->hasUnsafeAlgebra() || !OpC->hasUnsafeAlgebra())
  1179. return Ret;
  1180. // tan(atan(x)) -> x
  1181. // tanf(atanf(x)) -> x
  1182. // tanl(atanl(x)) -> x
  1183. LibFunc Func;
  1184. Function *F = OpC->getCalledFunction();
  1185. if (F && TLI->getLibFunc(F->getName(), Func) && TLI->has(Func) &&
  1186. ((Func == LibFunc_atan && Callee->getName() == "tan") ||
  1187. (Func == LibFunc_atanf && Callee->getName() == "tanf") ||
  1188. (Func == LibFunc_atanl && Callee->getName() == "tanl")))
  1189. Ret = OpC->getArgOperand(0);
  1190. return Ret;
  1191. }
  1192. static bool isTrigLibCall(CallInst *CI) {
  1193. // We can only hope to do anything useful if we can ignore things like errno
  1194. // and floating-point exceptions.
  1195. // We already checked the prototype.
  1196. return CI->hasFnAttr(Attribute::NoUnwind) &&
  1197. CI->hasFnAttr(Attribute::ReadNone);
  1198. }
  1199. static void insertSinCosCall(IRBuilder<> &B, Function *OrigCallee, Value *Arg,
  1200. bool UseFloat, Value *&Sin, Value *&Cos,
  1201. Value *&SinCos) {
  1202. Type *ArgTy = Arg->getType();
  1203. Type *ResTy;
  1204. StringRef Name;
  1205. Triple T(OrigCallee->getParent()->getTargetTriple());
  1206. if (UseFloat) {
  1207. Name = "__sincospif_stret";
  1208. assert(T.getArch() != Triple::x86 && "x86 messy and unsupported for now");
  1209. // x86_64 can't use {float, float} since that would be returned in both
  1210. // xmm0 and xmm1, which isn't what a real struct would do.
  1211. ResTy = T.getArch() == Triple::x86_64
  1212. ? static_cast<Type *>(VectorType::get(ArgTy, 2))
  1213. : static_cast<Type *>(StructType::get(ArgTy, ArgTy, nullptr));
  1214. } else {
  1215. Name = "__sincospi_stret";
  1216. ResTy = StructType::get(ArgTy, ArgTy, nullptr);
  1217. }
  1218. Module *M = OrigCallee->getParent();
  1219. Value *Callee = M->getOrInsertFunction(Name, OrigCallee->getAttributes(),
  1220. ResTy, ArgTy);
  1221. if (Instruction *ArgInst = dyn_cast<Instruction>(Arg)) {
  1222. // If the argument is an instruction, it must dominate all uses so put our
  1223. // sincos call there.
  1224. B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
  1225. } else {
  1226. // Otherwise (e.g. for a constant) the beginning of the function is as
  1227. // good a place as any.
  1228. BasicBlock &EntryBB = B.GetInsertBlock()->getParent()->getEntryBlock();
  1229. B.SetInsertPoint(&EntryBB, EntryBB.begin());
  1230. }
  1231. SinCos = B.CreateCall(Callee, Arg, "sincospi");
  1232. if (SinCos->getType()->isStructTy()) {
  1233. Sin = B.CreateExtractValue(SinCos, 0, "sinpi");
  1234. Cos = B.CreateExtractValue(SinCos, 1, "cospi");
  1235. } else {
  1236. Sin = B.CreateExtractElement(SinCos, ConstantInt::get(B.getInt32Ty(), 0),
  1237. "sinpi");
  1238. Cos = B.CreateExtractElement(SinCos, ConstantInt::get(B.getInt32Ty(), 1),
  1239. "cospi");
  1240. }
  1241. }
  1242. Value *LibCallSimplifier::optimizeSinCosPi(CallInst *CI, IRBuilder<> &B) {
  1243. // Make sure the prototype is as expected, otherwise the rest of the
  1244. // function is probably invalid and likely to abort.
  1245. if (!isTrigLibCall(CI))
  1246. return nullptr;
  1247. Value *Arg = CI->getArgOperand(0);
  1248. SmallVector<CallInst *, 1> SinCalls;
  1249. SmallVector<CallInst *, 1> CosCalls;
  1250. SmallVector<CallInst *, 1> SinCosCalls;
  1251. bool IsFloat = Arg->getType()->isFloatTy();
  1252. // Look for all compatible sinpi, cospi and sincospi calls with the same
  1253. // argument. If there are enough (in some sense) we can make the
  1254. // substitution.
  1255. Function *F = CI->getFunction();
  1256. for (User *U : Arg->users())
  1257. classifyArgUse(U, F, IsFloat, SinCalls, CosCalls, SinCosCalls);
  1258. // It's only worthwhile if both sinpi and cospi are actually used.
  1259. if (SinCosCalls.empty() && (SinCalls.empty() || CosCalls.empty()))
  1260. return nullptr;
  1261. Value *Sin, *Cos, *SinCos;
  1262. insertSinCosCall(B, CI->getCalledFunction(), Arg, IsFloat, Sin, Cos, SinCos);
  1263. auto replaceTrigInsts = [this](SmallVectorImpl<CallInst *> &Calls,
  1264. Value *Res) {
  1265. for (CallInst *C : Calls)
  1266. replaceAllUsesWith(C, Res);
  1267. };
  1268. replaceTrigInsts(SinCalls, Sin);
  1269. replaceTrigInsts(CosCalls, Cos);
  1270. replaceTrigInsts(SinCosCalls, SinCos);
  1271. return nullptr;
  1272. }
  1273. void LibCallSimplifier::classifyArgUse(
  1274. Value *Val, Function *F, bool IsFloat,
  1275. SmallVectorImpl<CallInst *> &SinCalls,
  1276. SmallVectorImpl<CallInst *> &CosCalls,
  1277. SmallVectorImpl<CallInst *> &SinCosCalls) {
  1278. CallInst *CI = dyn_cast<CallInst>(Val);
  1279. if (!CI)
  1280. return;
  1281. // Don't consider calls in other functions.
  1282. if (CI->getFunction() != F)
  1283. return;
  1284. Function *Callee = CI->getCalledFunction();
  1285. LibFunc Func;
  1286. if (!Callee || !TLI->getLibFunc(*Callee, Func) || !TLI->has(Func) ||
  1287. !isTrigLibCall(CI))
  1288. return;
  1289. if (IsFloat) {
  1290. if (Func == LibFunc_sinpif)
  1291. SinCalls.push_back(CI);
  1292. else if (Func == LibFunc_cospif)
  1293. CosCalls.push_back(CI);
  1294. else if (Func == LibFunc_sincospif_stret)
  1295. SinCosCalls.push_back(CI);
  1296. } else {
  1297. if (Func == LibFunc_sinpi)
  1298. SinCalls.push_back(CI);
  1299. else if (Func == LibFunc_cospi)
  1300. CosCalls.push_back(CI);
  1301. else if (Func == LibFunc_sincospi_stret)
  1302. SinCosCalls.push_back(CI);
  1303. }
  1304. }
  1305. //===----------------------------------------------------------------------===//
  1306. // Integer Library Call Optimizations
  1307. //===----------------------------------------------------------------------===//
  1308. Value *LibCallSimplifier::optimizeFFS(CallInst *CI, IRBuilder<> &B) {
  1309. // ffs(x) -> x != 0 ? (i32)llvm.cttz(x)+1 : 0
  1310. Value *Op = CI->getArgOperand(0);
  1311. Type *ArgType = Op->getType();
  1312. Value *F = Intrinsic::getDeclaration(CI->getCalledFunction()->getParent(),
  1313. Intrinsic::cttz, ArgType);
  1314. Value *V = B.CreateCall(F, {Op, B.getTrue()}, "cttz");
  1315. V = B.CreateAdd(V, ConstantInt::get(V->getType(), 1));
  1316. V = B.CreateIntCast(V, B.getInt32Ty(), false);
  1317. Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType));
  1318. return B.CreateSelect(Cond, V, B.getInt32(0));
  1319. }
  1320. Value *LibCallSimplifier::optimizeFls(CallInst *CI, IRBuilder<> &B) {
  1321. // fls(x) -> (i32)(sizeInBits(x) - llvm.ctlz(x, false))
  1322. Value *Op = CI->getArgOperand(0);
  1323. Type *ArgType = Op->getType();
  1324. Value *F = Intrinsic::getDeclaration(CI->getCalledFunction()->getParent(),
  1325. Intrinsic::ctlz, ArgType);
  1326. Value *V = B.CreateCall(F, {Op, B.getFalse()}, "ctlz");
  1327. V = B.CreateSub(ConstantInt::get(V->getType(), ArgType->getIntegerBitWidth()),
  1328. V);
  1329. return B.CreateIntCast(V, CI->getType(), false);
  1330. }
  1331. Value *LibCallSimplifier::optimizeAbs(CallInst *CI, IRBuilder<> &B) {
  1332. // abs(x) -> x >s -1 ? x : -x
  1333. Value *Op = CI->getArgOperand(0);
  1334. Value *Pos =
  1335. B.CreateICmpSGT(Op, Constant::getAllOnesValue(Op->getType()), "ispos");
  1336. Value *Neg = B.CreateNeg(Op, "neg");
  1337. return B.CreateSelect(Pos, Op, Neg);
  1338. }
  1339. Value *LibCallSimplifier::optimizeIsDigit(CallInst *CI, IRBuilder<> &B) {
  1340. // isdigit(c) -> (c-'0') <u 10
  1341. Value *Op = CI->getArgOperand(0);
  1342. Op = B.CreateSub(Op, B.getInt32('0'), "isdigittmp");
  1343. Op = B.CreateICmpULT(Op, B.getInt32(10), "isdigit");
  1344. return B.CreateZExt(Op, CI->getType());
  1345. }
  1346. Value *LibCallSimplifier::optimizeIsAscii(CallInst *CI, IRBuilder<> &B) {
  1347. // isascii(c) -> c <u 128
  1348. Value *Op = CI->getArgOperand(0);
  1349. Op = B.CreateICmpULT(Op, B.getInt32(128), "isascii");
  1350. return B.CreateZExt(Op, CI->getType());
  1351. }
  1352. Value *LibCallSimplifier::optimizeToAscii(CallInst *CI, IRBuilder<> &B) {
  1353. // toascii(c) -> c & 0x7f
  1354. return B.CreateAnd(CI->getArgOperand(0),
  1355. ConstantInt::get(CI->getType(), 0x7F));
  1356. }
  1357. //===----------------------------------------------------------------------===//
  1358. // Formatting and IO Library Call Optimizations
  1359. //===----------------------------------------------------------------------===//
  1360. static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg);
  1361. Value *LibCallSimplifier::optimizeErrorReporting(CallInst *CI, IRBuilder<> &B,
  1362. int StreamArg) {
  1363. Function *Callee = CI->getCalledFunction();
  1364. // Error reporting calls should be cold, mark them as such.
  1365. // This applies even to non-builtin calls: it is only a hint and applies to
  1366. // functions that the frontend might not understand as builtins.
  1367. // This heuristic was suggested in:
  1368. // Improving Static Branch Prediction in a Compiler
  1369. // Brian L. Deitrich, Ben-Chung Cheng, Wen-mei W. Hwu
  1370. // Proceedings of PACT'98, Oct. 1998, IEEE
  1371. if (!CI->hasFnAttr(Attribute::Cold) &&
  1372. isReportingError(Callee, CI, StreamArg)) {
  1373. CI->addAttribute(AttributeList::FunctionIndex, Attribute::Cold);
  1374. }
  1375. return nullptr;
  1376. }
  1377. static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg) {
  1378. if (!ColdErrorCalls || !Callee || !Callee->isDeclaration())
  1379. return false;
  1380. if (StreamArg < 0)
  1381. return true;
  1382. // These functions might be considered cold, but only if their stream
  1383. // argument is stderr.
  1384. if (StreamArg >= (int)CI->getNumArgOperands())
  1385. return false;
  1386. LoadInst *LI = dyn_cast<LoadInst>(CI->getArgOperand(StreamArg));
  1387. if (!LI)
  1388. return false;
  1389. GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getPointerOperand());
  1390. if (!GV || !GV->isDeclaration())
  1391. return false;
  1392. return GV->getName() == "stderr";
  1393. }
  1394. Value *LibCallSimplifier::optimizePrintFString(CallInst *CI, IRBuilder<> &B) {
  1395. // Check for a fixed format string.
  1396. StringRef FormatStr;
  1397. if (!getConstantStringInfo(CI->getArgOperand(0), FormatStr))
  1398. return nullptr;
  1399. // Empty format string -> noop.
  1400. if (FormatStr.empty()) // Tolerate printf's declared void.
  1401. return CI->use_empty() ? (Value *)CI : ConstantInt::get(CI->getType(), 0);
  1402. // Do not do any of the following transformations if the printf return value
  1403. // is used, in general the printf return value is not compatible with either
  1404. // putchar() or puts().
  1405. if (!CI->use_empty())
  1406. return nullptr;
  1407. // printf("x") -> putchar('x'), even for "%" and "%%".
  1408. if (FormatStr.size() == 1 || FormatStr == "%%")
  1409. return emitPutChar(B.getInt32(FormatStr[0]), B, TLI);
  1410. // printf("%s", "a") --> putchar('a')
  1411. if (FormatStr == "%s" && CI->getNumArgOperands() > 1) {
  1412. StringRef ChrStr;
  1413. if (!getConstantStringInfo(CI->getOperand(1), ChrStr))
  1414. return nullptr;
  1415. if (ChrStr.size() != 1)
  1416. return nullptr;
  1417. return emitPutChar(B.getInt32(ChrStr[0]), B, TLI);
  1418. }
  1419. // printf("foo\n") --> puts("foo")
  1420. if (FormatStr[FormatStr.size() - 1] == '\n' &&
  1421. FormatStr.find('%') == StringRef::npos) { // No format characters.
  1422. // Create a string literal with no \n on it. We expect the constant merge
  1423. // pass to be run after this pass, to merge duplicate strings.
  1424. FormatStr = FormatStr.drop_back();
  1425. Value *GV = B.CreateGlobalString(FormatStr, "str");
  1426. return emitPutS(GV, B, TLI);
  1427. }
  1428. // Optimize specific format strings.
  1429. // printf("%c", chr) --> putchar(chr)
  1430. if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
  1431. CI->getArgOperand(1)->getType()->isIntegerTy())
  1432. return emitPutChar(CI->getArgOperand(1), B, TLI);
  1433. // printf("%s\n", str) --> puts(str)
  1434. if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
  1435. CI->getArgOperand(1)->getType()->isPointerTy())
  1436. return emitPutS(CI->getArgOperand(1), B, TLI);
  1437. return nullptr;
  1438. }
  1439. Value *LibCallSimplifier::optimizePrintF(CallInst *CI, IRBuilder<> &B) {
  1440. Function *Callee = CI->getCalledFunction();
  1441. FunctionType *FT = Callee->getFunctionType();
  1442. if (Value *V = optimizePrintFString(CI, B)) {
  1443. return V;
  1444. }
  1445. // printf(format, ...) -> iprintf(format, ...) if no floating point
  1446. // arguments.
  1447. if (TLI->has(LibFunc_iprintf) && !callHasFloatingPointArgument(CI)) {
  1448. Module *M = B.GetInsertBlock()->getParent()->getParent();
  1449. Constant *IPrintFFn =
  1450. M->getOrInsertFunction("iprintf", FT, Callee->getAttributes());
  1451. CallInst *New = cast<CallInst>(CI->clone());
  1452. New->setCalledFunction(IPrintFFn);
  1453. B.Insert(New);
  1454. return New;
  1455. }
  1456. return nullptr;
  1457. }
  1458. Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI, IRBuilder<> &B) {
  1459. // Check for a fixed format string.
  1460. StringRef FormatStr;
  1461. if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
  1462. return nullptr;
  1463. // If we just have a format string (nothing else crazy) transform it.
  1464. if (CI->getNumArgOperands() == 2) {
  1465. // Make sure there's no % in the constant array. We could try to handle
  1466. // %% -> % in the future if we cared.
  1467. for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
  1468. if (FormatStr[i] == '%')
  1469. return nullptr; // we found a format specifier, bail out.
  1470. // sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
  1471. B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
  1472. ConstantInt::get(DL.getIntPtrType(CI->getContext()),
  1473. FormatStr.size() + 1),
  1474. 1); // Copy the null byte.
  1475. return ConstantInt::get(CI->getType(), FormatStr.size());
  1476. }
  1477. // The remaining optimizations require the format string to be "%s" or "%c"
  1478. // and have an extra operand.
  1479. if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
  1480. CI->getNumArgOperands() < 3)
  1481. return nullptr;
  1482. // Decode the second character of the format string.
  1483. if (FormatStr[1] == 'c') {
  1484. // sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
  1485. if (!CI->getArgOperand(2)->getType()->isIntegerTy())
  1486. return nullptr;
  1487. Value *V = B.CreateTrunc(CI->getArgOperand(2), B.getInt8Ty(), "char");
  1488. Value *Ptr = castToCStr(CI->getArgOperand(0), B);
  1489. B.CreateStore(V, Ptr);
  1490. Ptr = B.CreateGEP(B.getInt8Ty(), Ptr, B.getInt32(1), "nul");
  1491. B.CreateStore(B.getInt8(0), Ptr);
  1492. return ConstantInt::get(CI->getType(), 1);
  1493. }
  1494. if (FormatStr[1] == 's') {
  1495. // sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
  1496. if (!CI->getArgOperand(2)->getType()->isPointerTy())
  1497. return nullptr;
  1498. Value *Len = emitStrLen(CI->getArgOperand(2), B, DL, TLI);
  1499. if (!Len)
  1500. return nullptr;
  1501. Value *IncLen =
  1502. B.CreateAdd(Len, ConstantInt::get(Len->getType(), 1), "leninc");
  1503. B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(2), IncLen, 1);
  1504. // The sprintf result is the unincremented number of bytes in the string.
  1505. return B.CreateIntCast(Len, CI->getType(), false);
  1506. }
  1507. return nullptr;
  1508. }
  1509. Value *LibCallSimplifier::optimizeSPrintF(CallInst *CI, IRBuilder<> &B) {
  1510. Function *Callee = CI->getCalledFunction();
  1511. FunctionType *FT = Callee->getFunctionType();
  1512. if (Value *V = optimizeSPrintFString(CI, B)) {
  1513. return V;
  1514. }
  1515. // sprintf(str, format, ...) -> siprintf(str, format, ...) if no floating
  1516. // point arguments.
  1517. if (TLI->has(LibFunc_siprintf) && !callHasFloatingPointArgument(CI)) {
  1518. Module *M = B.GetInsertBlock()->getParent()->getParent();
  1519. Constant *SIPrintFFn =
  1520. M->getOrInsertFunction("siprintf", FT, Callee->getAttributes());
  1521. CallInst *New = cast<CallInst>(CI->clone());
  1522. New->setCalledFunction(SIPrintFFn);
  1523. B.Insert(New);
  1524. return New;
  1525. }
  1526. return nullptr;
  1527. }
  1528. Value *LibCallSimplifier::optimizeFPrintFString(CallInst *CI, IRBuilder<> &B) {
  1529. optimizeErrorReporting(CI, B, 0);
  1530. // All the optimizations depend on the format string.
  1531. StringRef FormatStr;
  1532. if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
  1533. return nullptr;
  1534. // Do not do any of the following transformations if the fprintf return
  1535. // value is used, in general the fprintf return value is not compatible
  1536. // with fwrite(), fputc() or fputs().
  1537. if (!CI->use_empty())
  1538. return nullptr;
  1539. // fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
  1540. if (CI->getNumArgOperands() == 2) {
  1541. for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
  1542. if (FormatStr[i] == '%') // Could handle %% -> % if we cared.
  1543. return nullptr; // We found a format specifier.
  1544. return emitFWrite(
  1545. CI->getArgOperand(1),
  1546. ConstantInt::get(DL.getIntPtrType(CI->getContext()), FormatStr.size()),
  1547. CI->getArgOperand(0), B, DL, TLI);
  1548. }
  1549. // The remaining optimizations require the format string to be "%s" or "%c"
  1550. // and have an extra operand.
  1551. if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
  1552. CI->getNumArgOperands() < 3)
  1553. return nullptr;
  1554. // Decode the second character of the format string.
  1555. if (FormatStr[1] == 'c') {
  1556. // fprintf(F, "%c", chr) --> fputc(chr, F)
  1557. if (!CI->getArgOperand(2)->getType()->isIntegerTy())
  1558. return nullptr;
  1559. return emitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TLI);
  1560. }
  1561. if (FormatStr[1] == 's') {
  1562. // fprintf(F, "%s", str) --> fputs(str, F)
  1563. if (!CI->getArgOperand(2)->getType()->isPointerTy())
  1564. return nullptr;
  1565. return emitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TLI);
  1566. }
  1567. return nullptr;
  1568. }
  1569. Value *LibCallSimplifier::optimizeFPrintF(CallInst *CI, IRBuilder<> &B) {
  1570. Function *Callee = CI->getCalledFunction();
  1571. FunctionType *FT = Callee->getFunctionType();
  1572. if (Value *V = optimizeFPrintFString(CI, B)) {
  1573. return V;
  1574. }
  1575. // fprintf(stream, format, ...) -> fiprintf(stream, format, ...) if no
  1576. // floating point arguments.
  1577. if (TLI->has(LibFunc_fiprintf) && !callHasFloatingPointArgument(CI)) {
  1578. Module *M = B.GetInsertBlock()->getParent()->getParent();
  1579. Constant *FIPrintFFn =
  1580. M->getOrInsertFunction("fiprintf", FT, Callee->getAttributes());
  1581. CallInst *New = cast<CallInst>(CI->clone());
  1582. New->setCalledFunction(FIPrintFFn);
  1583. B.Insert(New);
  1584. return New;
  1585. }
  1586. return nullptr;
  1587. }
  1588. Value *LibCallSimplifier::optimizeFWrite(CallInst *CI, IRBuilder<> &B) {
  1589. optimizeErrorReporting(CI, B, 3);
  1590. // Get the element size and count.
  1591. ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
  1592. ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
  1593. if (!SizeC || !CountC)
  1594. return nullptr;
  1595. uint64_t Bytes = SizeC->getZExtValue() * CountC->getZExtValue();
  1596. // If this is writing zero records, remove the call (it's a noop).
  1597. if (Bytes == 0)
  1598. return ConstantInt::get(CI->getType(), 0);
  1599. // If this is writing one byte, turn it into fputc.
  1600. // This optimisation is only valid, if the return value is unused.
  1601. if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
  1602. Value *Char = B.CreateLoad(castToCStr(CI->getArgOperand(0), B), "char");
  1603. Value *NewCI = emitFPutC(Char, CI->getArgOperand(3), B, TLI);
  1604. return NewCI ? ConstantInt::get(CI->getType(), 1) : nullptr;
  1605. }
  1606. return nullptr;
  1607. }
  1608. Value *LibCallSimplifier::optimizeFPuts(CallInst *CI, IRBuilder<> &B) {
  1609. optimizeErrorReporting(CI, B, 1);
  1610. // Don't rewrite fputs to fwrite when optimising for size because fwrite
  1611. // requires more arguments and thus extra MOVs are required.
  1612. if (CI->getParent()->getParent()->optForSize())
  1613. return nullptr;
  1614. // We can't optimize if return value is used.
  1615. if (!CI->use_empty())
  1616. return nullptr;
  1617. // fputs(s,F) --> fwrite(s,1,strlen(s),F)
  1618. uint64_t Len = GetStringLength(CI->getArgOperand(0));
  1619. if (!Len)
  1620. return nullptr;
  1621. // Known to have no uses (see above).
  1622. return emitFWrite(
  1623. CI->getArgOperand(0),
  1624. ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len - 1),
  1625. CI->getArgOperand(1), B, DL, TLI);
  1626. }
  1627. Value *LibCallSimplifier::optimizePuts(CallInst *CI, IRBuilder<> &B) {
  1628. // Check for a constant string.
  1629. StringRef Str;
  1630. if (!getConstantStringInfo(CI->getArgOperand(0), Str))
  1631. return nullptr;
  1632. if (Str.empty() && CI->use_empty()) {
  1633. // puts("") -> putchar('\n')
  1634. Value *Res = emitPutChar(B.getInt32('\n'), B, TLI);
  1635. if (CI->use_empty() || !Res)
  1636. return Res;
  1637. return B.CreateIntCast(Res, CI->getType(), true);
  1638. }
  1639. return nullptr;
  1640. }
  1641. bool LibCallSimplifier::hasFloatVersion(StringRef FuncName) {
  1642. LibFunc Func;
  1643. SmallString<20> FloatFuncName = FuncName;
  1644. FloatFuncName += 'f';
  1645. if (TLI->getLibFunc(FloatFuncName, Func))
  1646. return TLI->has(Func);
  1647. return false;
  1648. }
  1649. Value *LibCallSimplifier::optimizeStringMemoryLibCall(CallInst *CI,
  1650. IRBuilder<> &Builder) {
  1651. LibFunc Func;
  1652. Function *Callee = CI->getCalledFunction();
  1653. // Check for string/memory library functions.
  1654. if (TLI->getLibFunc(*Callee, Func) && TLI->has(Func)) {
  1655. // Make sure we never change the calling convention.
  1656. assert((ignoreCallingConv(Func) ||
  1657. isCallingConvCCompatible(CI)) &&
  1658. "Optimizing string/memory libcall would change the calling convention");
  1659. switch (Func) {
  1660. case LibFunc_strcat:
  1661. return optimizeStrCat(CI, Builder);
  1662. case LibFunc_strncat:
  1663. return optimizeStrNCat(CI, Builder);
  1664. case LibFunc_strchr:
  1665. return optimizeStrChr(CI, Builder);
  1666. case LibFunc_strrchr:
  1667. return optimizeStrRChr(CI, Builder);
  1668. case LibFunc_strcmp:
  1669. return optimizeStrCmp(CI, Builder);
  1670. case LibFunc_strncmp:
  1671. return optimizeStrNCmp(CI, Builder);
  1672. case LibFunc_strcpy:
  1673. return optimizeStrCpy(CI, Builder);
  1674. case LibFunc_stpcpy:
  1675. return optimizeStpCpy(CI, Builder);
  1676. case LibFunc_strncpy:
  1677. return optimizeStrNCpy(CI, Builder);
  1678. case LibFunc_strlen:
  1679. return optimizeStrLen(CI, Builder);
  1680. case LibFunc_strpbrk:
  1681. return optimizeStrPBrk(CI, Builder);
  1682. case LibFunc_strtol:
  1683. case LibFunc_strtod:
  1684. case LibFunc_strtof:
  1685. case LibFunc_strtoul:
  1686. case LibFunc_strtoll:
  1687. case LibFunc_strtold:
  1688. case LibFunc_strtoull:
  1689. return optimizeStrTo(CI, Builder);
  1690. case LibFunc_strspn:
  1691. return optimizeStrSpn(CI, Builder);
  1692. case LibFunc_strcspn:
  1693. return optimizeStrCSpn(CI, Builder);
  1694. case LibFunc_strstr:
  1695. return optimizeStrStr(CI, Builder);
  1696. case LibFunc_memchr:
  1697. return optimizeMemChr(CI, Builder);
  1698. case LibFunc_memcmp:
  1699. return optimizeMemCmp(CI, Builder);
  1700. case LibFunc_memcpy:
  1701. return optimizeMemCpy(CI, Builder);
  1702. case LibFunc_memmove:
  1703. return optimizeMemMove(CI, Builder);
  1704. case LibFunc_memset:
  1705. return optimizeMemSet(CI, Builder);
  1706. default:
  1707. break;
  1708. }
  1709. }
  1710. return nullptr;
  1711. }
  1712. Value *LibCallSimplifier::optimizeCall(CallInst *CI) {
  1713. if (CI->isNoBuiltin())
  1714. return nullptr;
  1715. LibFunc Func;
  1716. Function *Callee = CI->getCalledFunction();
  1717. StringRef FuncName = Callee->getName();
  1718. SmallVector<OperandBundleDef, 2> OpBundles;
  1719. CI->getOperandBundlesAsDefs(OpBundles);
  1720. IRBuilder<> Builder(CI, /*FPMathTag=*/nullptr, OpBundles);
  1721. bool isCallingConvC = isCallingConvCCompatible(CI);
  1722. // Command-line parameter overrides instruction attribute.
  1723. if (EnableUnsafeFPShrink.getNumOccurrences() > 0)
  1724. UnsafeFPShrink = EnableUnsafeFPShrink;
  1725. else if (isa<FPMathOperator>(CI) && CI->hasUnsafeAlgebra())
  1726. UnsafeFPShrink = true;
  1727. // First, check for intrinsics.
  1728. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
  1729. if (!isCallingConvC)
  1730. return nullptr;
  1731. switch (II->getIntrinsicID()) {
  1732. case Intrinsic::pow:
  1733. return optimizePow(CI, Builder);
  1734. case Intrinsic::exp2:
  1735. return optimizeExp2(CI, Builder);
  1736. case Intrinsic::log:
  1737. return optimizeLog(CI, Builder);
  1738. case Intrinsic::sqrt:
  1739. return optimizeSqrt(CI, Builder);
  1740. // TODO: Use foldMallocMemset() with memset intrinsic.
  1741. default:
  1742. return nullptr;
  1743. }
  1744. }
  1745. // Also try to simplify calls to fortified library functions.
  1746. if (Value *SimplifiedFortifiedCI = FortifiedSimplifier.optimizeCall(CI)) {
  1747. // Try to further simplify the result.
  1748. CallInst *SimplifiedCI = dyn_cast<CallInst>(SimplifiedFortifiedCI);
  1749. if (SimplifiedCI && SimplifiedCI->getCalledFunction()) {
  1750. // Use an IR Builder from SimplifiedCI if available instead of CI
  1751. // to guarantee we reach all uses we might replace later on.
  1752. IRBuilder<> TmpBuilder(SimplifiedCI);
  1753. if (Value *V = optimizeStringMemoryLibCall(SimplifiedCI, TmpBuilder)) {
  1754. // If we were able to further simplify, remove the now redundant call.
  1755. SimplifiedCI->replaceAllUsesWith(V);
  1756. SimplifiedCI->eraseFromParent();
  1757. return V;
  1758. }
  1759. }
  1760. return SimplifiedFortifiedCI;
  1761. }
  1762. // Then check for known library functions.
  1763. if (TLI->getLibFunc(*Callee, Func) && TLI->has(Func)) {
  1764. // We never change the calling convention.
  1765. if (!ignoreCallingConv(Func) && !isCallingConvC)
  1766. return nullptr;
  1767. if (Value *V = optimizeStringMemoryLibCall(CI, Builder))
  1768. return V;
  1769. switch (Func) {
  1770. case LibFunc_cosf:
  1771. case LibFunc_cos:
  1772. case LibFunc_cosl:
  1773. return optimizeCos(CI, Builder);
  1774. case LibFunc_sinpif:
  1775. case LibFunc_sinpi:
  1776. case LibFunc_cospif:
  1777. case LibFunc_cospi:
  1778. return optimizeSinCosPi(CI, Builder);
  1779. case LibFunc_powf:
  1780. case LibFunc_pow:
  1781. case LibFunc_powl:
  1782. return optimizePow(CI, Builder);
  1783. case LibFunc_exp2l:
  1784. case LibFunc_exp2:
  1785. case LibFunc_exp2f:
  1786. return optimizeExp2(CI, Builder);
  1787. case LibFunc_fabsf:
  1788. case LibFunc_fabs:
  1789. case LibFunc_fabsl:
  1790. return replaceUnaryCall(CI, Builder, Intrinsic::fabs);
  1791. case LibFunc_sqrtf:
  1792. case LibFunc_sqrt:
  1793. case LibFunc_sqrtl:
  1794. return optimizeSqrt(CI, Builder);
  1795. case LibFunc_ffs:
  1796. case LibFunc_ffsl:
  1797. case LibFunc_ffsll:
  1798. return optimizeFFS(CI, Builder);
  1799. case LibFunc_fls:
  1800. case LibFunc_flsl:
  1801. case LibFunc_flsll:
  1802. return optimizeFls(CI, Builder);
  1803. case LibFunc_abs:
  1804. case LibFunc_labs:
  1805. case LibFunc_llabs:
  1806. return optimizeAbs(CI, Builder);
  1807. case LibFunc_isdigit:
  1808. return optimizeIsDigit(CI, Builder);
  1809. case LibFunc_isascii:
  1810. return optimizeIsAscii(CI, Builder);
  1811. case LibFunc_toascii:
  1812. return optimizeToAscii(CI, Builder);
  1813. case LibFunc_printf:
  1814. return optimizePrintF(CI, Builder);
  1815. case LibFunc_sprintf:
  1816. return optimizeSPrintF(CI, Builder);
  1817. case LibFunc_fprintf:
  1818. return optimizeFPrintF(CI, Builder);
  1819. case LibFunc_fwrite:
  1820. return optimizeFWrite(CI, Builder);
  1821. case LibFunc_fputs:
  1822. return optimizeFPuts(CI, Builder);
  1823. case LibFunc_log:
  1824. case LibFunc_log10:
  1825. case LibFunc_log1p:
  1826. case LibFunc_log2:
  1827. case LibFunc_logb:
  1828. return optimizeLog(CI, Builder);
  1829. case LibFunc_puts:
  1830. return optimizePuts(CI, Builder);
  1831. case LibFunc_tan:
  1832. case LibFunc_tanf:
  1833. case LibFunc_tanl:
  1834. return optimizeTan(CI, Builder);
  1835. case LibFunc_perror:
  1836. return optimizeErrorReporting(CI, Builder);
  1837. case LibFunc_vfprintf:
  1838. case LibFunc_fiprintf:
  1839. return optimizeErrorReporting(CI, Builder, 0);
  1840. case LibFunc_fputc:
  1841. return optimizeErrorReporting(CI, Builder, 1);
  1842. case LibFunc_ceil:
  1843. return replaceUnaryCall(CI, Builder, Intrinsic::ceil);
  1844. case LibFunc_floor:
  1845. return replaceUnaryCall(CI, Builder, Intrinsic::floor);
  1846. case LibFunc_round:
  1847. return replaceUnaryCall(CI, Builder, Intrinsic::round);
  1848. case LibFunc_nearbyint:
  1849. return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint);
  1850. case LibFunc_rint:
  1851. return replaceUnaryCall(CI, Builder, Intrinsic::rint);
  1852. case LibFunc_trunc:
  1853. return replaceUnaryCall(CI, Builder, Intrinsic::trunc);
  1854. case LibFunc_acos:
  1855. case LibFunc_acosh:
  1856. case LibFunc_asin:
  1857. case LibFunc_asinh:
  1858. case LibFunc_atan:
  1859. case LibFunc_atanh:
  1860. case LibFunc_cbrt:
  1861. case LibFunc_cosh:
  1862. case LibFunc_exp:
  1863. case LibFunc_exp10:
  1864. case LibFunc_expm1:
  1865. case LibFunc_sin:
  1866. case LibFunc_sinh:
  1867. case LibFunc_tanh:
  1868. if (UnsafeFPShrink && hasFloatVersion(FuncName))
  1869. return optimizeUnaryDoubleFP(CI, Builder, true);
  1870. return nullptr;
  1871. case LibFunc_copysign:
  1872. if (hasFloatVersion(FuncName))
  1873. return optimizeBinaryDoubleFP(CI, Builder);
  1874. return nullptr;
  1875. case LibFunc_fminf:
  1876. case LibFunc_fmin:
  1877. case LibFunc_fminl:
  1878. case LibFunc_fmaxf:
  1879. case LibFunc_fmax:
  1880. case LibFunc_fmaxl:
  1881. return optimizeFMinFMax(CI, Builder);
  1882. default:
  1883. return nullptr;
  1884. }
  1885. }
  1886. return nullptr;
  1887. }
  1888. LibCallSimplifier::LibCallSimplifier(
  1889. const DataLayout &DL, const TargetLibraryInfo *TLI,
  1890. function_ref<void(Instruction *, Value *)> Replacer)
  1891. : FortifiedSimplifier(TLI), DL(DL), TLI(TLI), UnsafeFPShrink(false),
  1892. Replacer(Replacer) {}
  1893. void LibCallSimplifier::replaceAllUsesWith(Instruction *I, Value *With) {
  1894. // Indirect through the replacer used in this instance.
  1895. Replacer(I, With);
  1896. }
  1897. // TODO:
  1898. // Additional cases that we need to add to this file:
  1899. //
  1900. // cbrt:
  1901. // * cbrt(expN(X)) -> expN(x/3)
  1902. // * cbrt(sqrt(x)) -> pow(x,1/6)
  1903. // * cbrt(cbrt(x)) -> pow(x,1/9)
  1904. //
  1905. // exp, expf, expl:
  1906. // * exp(log(x)) -> x
  1907. //
  1908. // log, logf, logl:
  1909. // * log(exp(x)) -> x
  1910. // * log(exp(y)) -> y*log(e)
  1911. // * log(exp10(y)) -> y*log(10)
  1912. // * log(sqrt(x)) -> 0.5*log(x)
  1913. //
  1914. // pow, powf, powl:
  1915. // * pow(sqrt(x),y) -> pow(x,y*0.5)
  1916. // * pow(pow(x,y),z)-> pow(x,y*z)
  1917. //
  1918. // signbit:
  1919. // * signbit(cnst) -> cnst'
  1920. // * signbit(nncst) -> 0 (if pstv is a non-negative constant)
  1921. //
  1922. // sqrt, sqrtf, sqrtl:
  1923. // * sqrt(expN(x)) -> expN(x*0.5)
  1924. // * sqrt(Nroot(x)) -> pow(x,1/(2*N))
  1925. // * sqrt(pow(x,y)) -> pow(|x|,y*0.5)
  1926. //
  1927. //===----------------------------------------------------------------------===//
  1928. // Fortified Library Call Optimizations
  1929. //===----------------------------------------------------------------------===//
  1930. bool FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
  1931. unsigned ObjSizeOp,
  1932. unsigned SizeOp,
  1933. bool isString) {
  1934. if (CI->getArgOperand(ObjSizeOp) == CI->getArgOperand(SizeOp))
  1935. return true;
  1936. if (ConstantInt *ObjSizeCI =
  1937. dyn_cast<ConstantInt>(CI->getArgOperand(ObjSizeOp))) {
  1938. if (ObjSizeCI->isAllOnesValue())
  1939. return true;
  1940. // If the object size wasn't -1 (unknown), bail out if we were asked to.
  1941. if (OnlyLowerUnknownSize)
  1942. return false;
  1943. if (isString) {
  1944. uint64_t Len = GetStringLength(CI->getArgOperand(SizeOp));
  1945. // If the length is 0 we don't know how long it is and so we can't
  1946. // remove the check.
  1947. if (Len == 0)
  1948. return false;
  1949. return ObjSizeCI->getZExtValue() >= Len;
  1950. }
  1951. if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getArgOperand(SizeOp)))
  1952. return ObjSizeCI->getZExtValue() >= SizeCI->getZExtValue();
  1953. }
  1954. return false;
  1955. }
  1956. Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI,
  1957. IRBuilder<> &B) {
  1958. if (isFortifiedCallFoldable(CI, 3, 2, false)) {
  1959. B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
  1960. CI->getArgOperand(2), 1);
  1961. return CI->getArgOperand(0);
  1962. }
  1963. return nullptr;
  1964. }
  1965. Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI,
  1966. IRBuilder<> &B) {
  1967. if (isFortifiedCallFoldable(CI, 3, 2, false)) {
  1968. B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
  1969. CI->getArgOperand(2), 1);
  1970. return CI->getArgOperand(0);
  1971. }
  1972. return nullptr;
  1973. }
  1974. Value *FortifiedLibCallSimplifier::optimizeMemSetChk(CallInst *CI,
  1975. IRBuilder<> &B) {
  1976. // TODO: Try foldMallocMemset() here.
  1977. if (isFortifiedCallFoldable(CI, 3, 2, false)) {
  1978. Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
  1979. B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
  1980. return CI->getArgOperand(0);
  1981. }
  1982. return nullptr;
  1983. }
  1984. Value *FortifiedLibCallSimplifier::optimizeStrpCpyChk(CallInst *CI,
  1985. IRBuilder<> &B,
  1986. LibFunc Func) {
  1987. Function *Callee = CI->getCalledFunction();
  1988. StringRef Name = Callee->getName();
  1989. const DataLayout &DL = CI->getModule()->getDataLayout();
  1990. Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1),
  1991. *ObjSize = CI->getArgOperand(2);
  1992. // __stpcpy_chk(x,x,...) -> x+strlen(x)
  1993. if (Func == LibFunc_stpcpy_chk && !OnlyLowerUnknownSize && Dst == Src) {
  1994. Value *StrLen = emitStrLen(Src, B, DL, TLI);
  1995. return StrLen ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, StrLen) : nullptr;
  1996. }
  1997. // If a) we don't have any length information, or b) we know this will
  1998. // fit then just lower to a plain st[rp]cpy. Otherwise we'll keep our
  1999. // st[rp]cpy_chk call which may fail at runtime if the size is too long.
  2000. // TODO: It might be nice to get a maximum length out of the possible
  2001. // string lengths for varying.
  2002. if (isFortifiedCallFoldable(CI, 2, 1, true))
  2003. return emitStrCpy(Dst, Src, B, TLI, Name.substr(2, 6));
  2004. if (OnlyLowerUnknownSize)
  2005. return nullptr;
  2006. // Maybe we can stil fold __st[rp]cpy_chk to __memcpy_chk.
  2007. uint64_t Len = GetStringLength(Src);
  2008. if (Len == 0)
  2009. return nullptr;
  2010. Type *SizeTTy = DL.getIntPtrType(CI->getContext());
  2011. Value *LenV = ConstantInt::get(SizeTTy, Len);
  2012. Value *Ret = emitMemCpyChk(Dst, Src, LenV, ObjSize, B, DL, TLI);
  2013. // If the function was an __stpcpy_chk, and we were able to fold it into
  2014. // a __memcpy_chk, we still need to return the correct end pointer.
  2015. if (Ret && Func == LibFunc_stpcpy_chk)
  2016. return B.CreateGEP(B.getInt8Ty(), Dst, ConstantInt::get(SizeTTy, Len - 1));
  2017. return Ret;
  2018. }
  2019. Value *FortifiedLibCallSimplifier::optimizeStrpNCpyChk(CallInst *CI,
  2020. IRBuilder<> &B,
  2021. LibFunc Func) {
  2022. Function *Callee = CI->getCalledFunction();
  2023. StringRef Name = Callee->getName();
  2024. if (isFortifiedCallFoldable(CI, 3, 2, false)) {
  2025. Value *Ret = emitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
  2026. CI->getArgOperand(2), B, TLI, Name.substr(2, 7));
  2027. return Ret;
  2028. }
  2029. return nullptr;
  2030. }
  2031. Value *FortifiedLibCallSimplifier::optimizeCall(CallInst *CI) {
  2032. // FIXME: We shouldn't be changing "nobuiltin" or TLI unavailable calls here.
  2033. // Some clang users checked for _chk libcall availability using:
  2034. // __has_builtin(__builtin___memcpy_chk)
  2035. // When compiling with -fno-builtin, this is always true.
  2036. // When passing -ffreestanding/-mkernel, which both imply -fno-builtin, we
  2037. // end up with fortified libcalls, which isn't acceptable in a freestanding
  2038. // environment which only provides their non-fortified counterparts.
  2039. //
  2040. // Until we change clang and/or teach external users to check for availability
  2041. // differently, disregard the "nobuiltin" attribute and TLI::has.
  2042. //
  2043. // PR23093.
  2044. LibFunc Func;
  2045. Function *Callee = CI->getCalledFunction();
  2046. SmallVector<OperandBundleDef, 2> OpBundles;
  2047. CI->getOperandBundlesAsDefs(OpBundles);
  2048. IRBuilder<> Builder(CI, /*FPMathTag=*/nullptr, OpBundles);
  2049. bool isCallingConvC = isCallingConvCCompatible(CI);
  2050. // First, check that this is a known library functions and that the prototype
  2051. // is correct.
  2052. if (!TLI->getLibFunc(*Callee, Func))
  2053. return nullptr;
  2054. // We never change the calling convention.
  2055. if (!ignoreCallingConv(Func) && !isCallingConvC)
  2056. return nullptr;
  2057. switch (Func) {
  2058. case LibFunc_memcpy_chk:
  2059. return optimizeMemCpyChk(CI, Builder);
  2060. case LibFunc_memmove_chk:
  2061. return optimizeMemMoveChk(CI, Builder);
  2062. case LibFunc_memset_chk:
  2063. return optimizeMemSetChk(CI, Builder);
  2064. case LibFunc_stpcpy_chk:
  2065. case LibFunc_strcpy_chk:
  2066. return optimizeStrpCpyChk(CI, Builder, Func);
  2067. case LibFunc_stpncpy_chk:
  2068. case LibFunc_strncpy_chk:
  2069. return optimizeStrpNCpyChk(CI, Builder, Func);
  2070. default:
  2071. break;
  2072. }
  2073. return nullptr;
  2074. }
  2075. FortifiedLibCallSimplifier::FortifiedLibCallSimplifier(
  2076. const TargetLibraryInfo *TLI, bool OnlyLowerUnknownSize)
  2077. : TLI(TLI), OnlyLowerUnknownSize(OnlyLowerUnknownSize) {}