InstCombineSelect.cpp 101 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666
  1. //===- InstCombineSelect.cpp ----------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements the visitSelect function.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "InstCombineInternal.h"
  13. #include "llvm/ADT/APInt.h"
  14. #include "llvm/ADT/Optional.h"
  15. #include "llvm/ADT/STLExtras.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/Analysis/AssumptionCache.h"
  18. #include "llvm/Analysis/CmpInstAnalysis.h"
  19. #include "llvm/Analysis/InstructionSimplify.h"
  20. #include "llvm/Analysis/ValueTracking.h"
  21. #include "llvm/IR/BasicBlock.h"
  22. #include "llvm/IR/Constant.h"
  23. #include "llvm/IR/Constants.h"
  24. #include "llvm/IR/DerivedTypes.h"
  25. #include "llvm/IR/IRBuilder.h"
  26. #include "llvm/IR/InstrTypes.h"
  27. #include "llvm/IR/Instruction.h"
  28. #include "llvm/IR/Instructions.h"
  29. #include "llvm/IR/IntrinsicInst.h"
  30. #include "llvm/IR/Intrinsics.h"
  31. #include "llvm/IR/Operator.h"
  32. #include "llvm/IR/PatternMatch.h"
  33. #include "llvm/IR/Type.h"
  34. #include "llvm/IR/User.h"
  35. #include "llvm/IR/Value.h"
  36. #include "llvm/Support/Casting.h"
  37. #include "llvm/Support/ErrorHandling.h"
  38. #include "llvm/Support/KnownBits.h"
  39. #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
  40. #include <cassert>
  41. #include <utility>
  42. using namespace llvm;
  43. using namespace PatternMatch;
  44. #define DEBUG_TYPE "instcombine"
  45. static Value *createMinMax(InstCombiner::BuilderTy &Builder,
  46. SelectPatternFlavor SPF, Value *A, Value *B) {
  47. CmpInst::Predicate Pred = getMinMaxPred(SPF);
  48. assert(CmpInst::isIntPredicate(Pred) && "Expected integer predicate");
  49. return Builder.CreateSelect(Builder.CreateICmp(Pred, A, B), A, B);
  50. }
  51. /// Replace a select operand based on an equality comparison with the identity
  52. /// constant of a binop.
  53. static Instruction *foldSelectBinOpIdentity(SelectInst &Sel,
  54. const TargetLibraryInfo &TLI) {
  55. // The select condition must be an equality compare with a constant operand.
  56. Value *X;
  57. Constant *C;
  58. CmpInst::Predicate Pred;
  59. if (!match(Sel.getCondition(), m_Cmp(Pred, m_Value(X), m_Constant(C))))
  60. return nullptr;
  61. bool IsEq;
  62. if (ICmpInst::isEquality(Pred))
  63. IsEq = Pred == ICmpInst::ICMP_EQ;
  64. else if (Pred == FCmpInst::FCMP_OEQ)
  65. IsEq = true;
  66. else if (Pred == FCmpInst::FCMP_UNE)
  67. IsEq = false;
  68. else
  69. return nullptr;
  70. // A select operand must be a binop.
  71. BinaryOperator *BO;
  72. if (!match(Sel.getOperand(IsEq ? 1 : 2), m_BinOp(BO)))
  73. return nullptr;
  74. // The compare constant must be the identity constant for that binop.
  75. // If this a floating-point compare with 0.0, any zero constant will do.
  76. Type *Ty = BO->getType();
  77. Constant *IdC = ConstantExpr::getBinOpIdentity(BO->getOpcode(), Ty, true);
  78. if (IdC != C) {
  79. if (!IdC || !CmpInst::isFPPredicate(Pred))
  80. return nullptr;
  81. if (!match(IdC, m_AnyZeroFP()) || !match(C, m_AnyZeroFP()))
  82. return nullptr;
  83. }
  84. // Last, match the compare variable operand with a binop operand.
  85. Value *Y;
  86. if (!BO->isCommutative() && !match(BO, m_BinOp(m_Value(Y), m_Specific(X))))
  87. return nullptr;
  88. if (!match(BO, m_c_BinOp(m_Value(Y), m_Specific(X))))
  89. return nullptr;
  90. // +0.0 compares equal to -0.0, and so it does not behave as required for this
  91. // transform. Bail out if we can not exclude that possibility.
  92. if (isa<FPMathOperator>(BO))
  93. if (!BO->hasNoSignedZeros() && !CannotBeNegativeZero(Y, &TLI))
  94. return nullptr;
  95. // BO = binop Y, X
  96. // S = { select (cmp eq X, C), BO, ? } or { select (cmp ne X, C), ?, BO }
  97. // =>
  98. // S = { select (cmp eq X, C), Y, ? } or { select (cmp ne X, C), ?, Y }
  99. Sel.setOperand(IsEq ? 1 : 2, Y);
  100. return &Sel;
  101. }
  102. /// This folds:
  103. /// select (icmp eq (and X, C1)), TC, FC
  104. /// iff C1 is a power 2 and the difference between TC and FC is a power-of-2.
  105. /// To something like:
  106. /// (shr (and (X, C1)), (log2(C1) - log2(TC-FC))) + FC
  107. /// Or:
  108. /// (shl (and (X, C1)), (log2(TC-FC) - log2(C1))) + FC
  109. /// With some variations depending if FC is larger than TC, or the shift
  110. /// isn't needed, or the bit widths don't match.
  111. static Value *foldSelectICmpAnd(SelectInst &Sel, ICmpInst *Cmp,
  112. InstCombiner::BuilderTy &Builder) {
  113. const APInt *SelTC, *SelFC;
  114. if (!match(Sel.getTrueValue(), m_APInt(SelTC)) ||
  115. !match(Sel.getFalseValue(), m_APInt(SelFC)))
  116. return nullptr;
  117. // If this is a vector select, we need a vector compare.
  118. Type *SelType = Sel.getType();
  119. if (SelType->isVectorTy() != Cmp->getType()->isVectorTy())
  120. return nullptr;
  121. Value *V;
  122. APInt AndMask;
  123. bool CreateAnd = false;
  124. ICmpInst::Predicate Pred = Cmp->getPredicate();
  125. if (ICmpInst::isEquality(Pred)) {
  126. if (!match(Cmp->getOperand(1), m_Zero()))
  127. return nullptr;
  128. V = Cmp->getOperand(0);
  129. const APInt *AndRHS;
  130. if (!match(V, m_And(m_Value(), m_Power2(AndRHS))))
  131. return nullptr;
  132. AndMask = *AndRHS;
  133. } else if (decomposeBitTestICmp(Cmp->getOperand(0), Cmp->getOperand(1),
  134. Pred, V, AndMask)) {
  135. assert(ICmpInst::isEquality(Pred) && "Not equality test?");
  136. if (!AndMask.isPowerOf2())
  137. return nullptr;
  138. CreateAnd = true;
  139. } else {
  140. return nullptr;
  141. }
  142. // In general, when both constants are non-zero, we would need an offset to
  143. // replace the select. This would require more instructions than we started
  144. // with. But there's one special-case that we handle here because it can
  145. // simplify/reduce the instructions.
  146. APInt TC = *SelTC;
  147. APInt FC = *SelFC;
  148. if (!TC.isNullValue() && !FC.isNullValue()) {
  149. // If the select constants differ by exactly one bit and that's the same
  150. // bit that is masked and checked by the select condition, the select can
  151. // be replaced by bitwise logic to set/clear one bit of the constant result.
  152. if (TC.getBitWidth() != AndMask.getBitWidth() || (TC ^ FC) != AndMask)
  153. return nullptr;
  154. if (CreateAnd) {
  155. // If we have to create an 'and', then we must kill the cmp to not
  156. // increase the instruction count.
  157. if (!Cmp->hasOneUse())
  158. return nullptr;
  159. V = Builder.CreateAnd(V, ConstantInt::get(SelType, AndMask));
  160. }
  161. bool ExtraBitInTC = TC.ugt(FC);
  162. if (Pred == ICmpInst::ICMP_EQ) {
  163. // If the masked bit in V is clear, clear or set the bit in the result:
  164. // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) ^ TC
  165. // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) | TC
  166. Constant *C = ConstantInt::get(SelType, TC);
  167. return ExtraBitInTC ? Builder.CreateXor(V, C) : Builder.CreateOr(V, C);
  168. }
  169. if (Pred == ICmpInst::ICMP_NE) {
  170. // If the masked bit in V is set, set or clear the bit in the result:
  171. // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) | FC
  172. // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) ^ FC
  173. Constant *C = ConstantInt::get(SelType, FC);
  174. return ExtraBitInTC ? Builder.CreateOr(V, C) : Builder.CreateXor(V, C);
  175. }
  176. llvm_unreachable("Only expecting equality predicates");
  177. }
  178. // Make sure one of the select arms is a power-of-2.
  179. if (!TC.isPowerOf2() && !FC.isPowerOf2())
  180. return nullptr;
  181. // Determine which shift is needed to transform result of the 'and' into the
  182. // desired result.
  183. const APInt &ValC = !TC.isNullValue() ? TC : FC;
  184. unsigned ValZeros = ValC.logBase2();
  185. unsigned AndZeros = AndMask.logBase2();
  186. // Insert the 'and' instruction on the input to the truncate.
  187. if (CreateAnd)
  188. V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), AndMask));
  189. // If types don't match, we can still convert the select by introducing a zext
  190. // or a trunc of the 'and'.
  191. if (ValZeros > AndZeros) {
  192. V = Builder.CreateZExtOrTrunc(V, SelType);
  193. V = Builder.CreateShl(V, ValZeros - AndZeros);
  194. } else if (ValZeros < AndZeros) {
  195. V = Builder.CreateLShr(V, AndZeros - ValZeros);
  196. V = Builder.CreateZExtOrTrunc(V, SelType);
  197. } else {
  198. V = Builder.CreateZExtOrTrunc(V, SelType);
  199. }
  200. // Okay, now we know that everything is set up, we just don't know whether we
  201. // have a icmp_ne or icmp_eq and whether the true or false val is the zero.
  202. bool ShouldNotVal = !TC.isNullValue();
  203. ShouldNotVal ^= Pred == ICmpInst::ICMP_NE;
  204. if (ShouldNotVal)
  205. V = Builder.CreateXor(V, ValC);
  206. return V;
  207. }
  208. /// We want to turn code that looks like this:
  209. /// %C = or %A, %B
  210. /// %D = select %cond, %C, %A
  211. /// into:
  212. /// %C = select %cond, %B, 0
  213. /// %D = or %A, %C
  214. ///
  215. /// Assuming that the specified instruction is an operand to the select, return
  216. /// a bitmask indicating which operands of this instruction are foldable if they
  217. /// equal the other incoming value of the select.
  218. static unsigned getSelectFoldableOperands(BinaryOperator *I) {
  219. switch (I->getOpcode()) {
  220. case Instruction::Add:
  221. case Instruction::Mul:
  222. case Instruction::And:
  223. case Instruction::Or:
  224. case Instruction::Xor:
  225. return 3; // Can fold through either operand.
  226. case Instruction::Sub: // Can only fold on the amount subtracted.
  227. case Instruction::Shl: // Can only fold on the shift amount.
  228. case Instruction::LShr:
  229. case Instruction::AShr:
  230. return 1;
  231. default:
  232. return 0; // Cannot fold
  233. }
  234. }
  235. /// For the same transformation as the previous function, return the identity
  236. /// constant that goes into the select.
  237. static APInt getSelectFoldableConstant(BinaryOperator *I) {
  238. switch (I->getOpcode()) {
  239. default: llvm_unreachable("This cannot happen!");
  240. case Instruction::Add:
  241. case Instruction::Sub:
  242. case Instruction::Or:
  243. case Instruction::Xor:
  244. case Instruction::Shl:
  245. case Instruction::LShr:
  246. case Instruction::AShr:
  247. return APInt::getNullValue(I->getType()->getScalarSizeInBits());
  248. case Instruction::And:
  249. return APInt::getAllOnesValue(I->getType()->getScalarSizeInBits());
  250. case Instruction::Mul:
  251. return APInt(I->getType()->getScalarSizeInBits(), 1);
  252. }
  253. }
  254. /// We have (select c, TI, FI), and we know that TI and FI have the same opcode.
  255. Instruction *InstCombiner::foldSelectOpOp(SelectInst &SI, Instruction *TI,
  256. Instruction *FI) {
  257. // Don't break up min/max patterns. The hasOneUse checks below prevent that
  258. // for most cases, but vector min/max with bitcasts can be transformed. If the
  259. // one-use restrictions are eased for other patterns, we still don't want to
  260. // obfuscate min/max.
  261. if ((match(&SI, m_SMin(m_Value(), m_Value())) ||
  262. match(&SI, m_SMax(m_Value(), m_Value())) ||
  263. match(&SI, m_UMin(m_Value(), m_Value())) ||
  264. match(&SI, m_UMax(m_Value(), m_Value()))))
  265. return nullptr;
  266. // If this is a cast from the same type, merge.
  267. Value *Cond = SI.getCondition();
  268. Type *CondTy = Cond->getType();
  269. if (TI->getNumOperands() == 1 && TI->isCast()) {
  270. Type *FIOpndTy = FI->getOperand(0)->getType();
  271. if (TI->getOperand(0)->getType() != FIOpndTy)
  272. return nullptr;
  273. // The select condition may be a vector. We may only change the operand
  274. // type if the vector width remains the same (and matches the condition).
  275. if (CondTy->isVectorTy()) {
  276. if (!FIOpndTy->isVectorTy())
  277. return nullptr;
  278. if (CondTy->getVectorNumElements() != FIOpndTy->getVectorNumElements())
  279. return nullptr;
  280. // TODO: If the backend knew how to deal with casts better, we could
  281. // remove this limitation. For now, there's too much potential to create
  282. // worse codegen by promoting the select ahead of size-altering casts
  283. // (PR28160).
  284. //
  285. // Note that ValueTracking's matchSelectPattern() looks through casts
  286. // without checking 'hasOneUse' when it matches min/max patterns, so this
  287. // transform may end up happening anyway.
  288. if (TI->getOpcode() != Instruction::BitCast &&
  289. (!TI->hasOneUse() || !FI->hasOneUse()))
  290. return nullptr;
  291. } else if (!TI->hasOneUse() || !FI->hasOneUse()) {
  292. // TODO: The one-use restrictions for a scalar select could be eased if
  293. // the fold of a select in visitLoadInst() was enhanced to match a pattern
  294. // that includes a cast.
  295. return nullptr;
  296. }
  297. // Fold this by inserting a select from the input values.
  298. Value *NewSI =
  299. Builder.CreateSelect(Cond, TI->getOperand(0), FI->getOperand(0),
  300. SI.getName() + ".v", &SI);
  301. return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
  302. TI->getType());
  303. }
  304. // Cond ? -X : -Y --> -(Cond ? X : Y)
  305. Value *X, *Y;
  306. if (match(TI, m_FNeg(m_Value(X))) && match(FI, m_FNeg(m_Value(Y))) &&
  307. (TI->hasOneUse() || FI->hasOneUse())) {
  308. Value *NewSel = Builder.CreateSelect(Cond, X, Y, SI.getName() + ".v", &SI);
  309. // TODO: Remove the hack for the binop form when the unary op is optimized
  310. // properly with all IR passes.
  311. if (TI->getOpcode() != Instruction::FNeg)
  312. return BinaryOperator::CreateFNegFMF(NewSel, cast<BinaryOperator>(TI));
  313. return UnaryOperator::CreateFNeg(NewSel);
  314. }
  315. // Only handle binary operators (including two-operand getelementptr) with
  316. // one-use here. As with the cast case above, it may be possible to relax the
  317. // one-use constraint, but that needs be examined carefully since it may not
  318. // reduce the total number of instructions.
  319. if (TI->getNumOperands() != 2 || FI->getNumOperands() != 2 ||
  320. (!isa<BinaryOperator>(TI) && !isa<GetElementPtrInst>(TI)) ||
  321. !TI->hasOneUse() || !FI->hasOneUse())
  322. return nullptr;
  323. // Figure out if the operations have any operands in common.
  324. Value *MatchOp, *OtherOpT, *OtherOpF;
  325. bool MatchIsOpZero;
  326. if (TI->getOperand(0) == FI->getOperand(0)) {
  327. MatchOp = TI->getOperand(0);
  328. OtherOpT = TI->getOperand(1);
  329. OtherOpF = FI->getOperand(1);
  330. MatchIsOpZero = true;
  331. } else if (TI->getOperand(1) == FI->getOperand(1)) {
  332. MatchOp = TI->getOperand(1);
  333. OtherOpT = TI->getOperand(0);
  334. OtherOpF = FI->getOperand(0);
  335. MatchIsOpZero = false;
  336. } else if (!TI->isCommutative()) {
  337. return nullptr;
  338. } else if (TI->getOperand(0) == FI->getOperand(1)) {
  339. MatchOp = TI->getOperand(0);
  340. OtherOpT = TI->getOperand(1);
  341. OtherOpF = FI->getOperand(0);
  342. MatchIsOpZero = true;
  343. } else if (TI->getOperand(1) == FI->getOperand(0)) {
  344. MatchOp = TI->getOperand(1);
  345. OtherOpT = TI->getOperand(0);
  346. OtherOpF = FI->getOperand(1);
  347. MatchIsOpZero = true;
  348. } else {
  349. return nullptr;
  350. }
  351. // If the select condition is a vector, the operands of the original select's
  352. // operands also must be vectors. This may not be the case for getelementptr
  353. // for example.
  354. if (CondTy->isVectorTy() && (!OtherOpT->getType()->isVectorTy() ||
  355. !OtherOpF->getType()->isVectorTy()))
  356. return nullptr;
  357. // If we reach here, they do have operations in common.
  358. Value *NewSI = Builder.CreateSelect(Cond, OtherOpT, OtherOpF,
  359. SI.getName() + ".v", &SI);
  360. Value *Op0 = MatchIsOpZero ? MatchOp : NewSI;
  361. Value *Op1 = MatchIsOpZero ? NewSI : MatchOp;
  362. if (auto *BO = dyn_cast<BinaryOperator>(TI)) {
  363. BinaryOperator *NewBO = BinaryOperator::Create(BO->getOpcode(), Op0, Op1);
  364. NewBO->copyIRFlags(TI);
  365. NewBO->andIRFlags(FI);
  366. return NewBO;
  367. }
  368. if (auto *TGEP = dyn_cast<GetElementPtrInst>(TI)) {
  369. auto *FGEP = cast<GetElementPtrInst>(FI);
  370. Type *ElementType = TGEP->getResultElementType();
  371. return TGEP->isInBounds() && FGEP->isInBounds()
  372. ? GetElementPtrInst::CreateInBounds(ElementType, Op0, {Op1})
  373. : GetElementPtrInst::Create(ElementType, Op0, {Op1});
  374. }
  375. llvm_unreachable("Expected BinaryOperator or GEP");
  376. return nullptr;
  377. }
  378. static bool isSelect01(const APInt &C1I, const APInt &C2I) {
  379. if (!C1I.isNullValue() && !C2I.isNullValue()) // One side must be zero.
  380. return false;
  381. return C1I.isOneValue() || C1I.isAllOnesValue() ||
  382. C2I.isOneValue() || C2I.isAllOnesValue();
  383. }
  384. /// Try to fold the select into one of the operands to allow further
  385. /// optimization.
  386. Instruction *InstCombiner::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
  387. Value *FalseVal) {
  388. // See the comment above GetSelectFoldableOperands for a description of the
  389. // transformation we are doing here.
  390. if (auto *TVI = dyn_cast<BinaryOperator>(TrueVal)) {
  391. if (TVI->hasOneUse() && !isa<Constant>(FalseVal)) {
  392. if (unsigned SFO = getSelectFoldableOperands(TVI)) {
  393. unsigned OpToFold = 0;
  394. if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
  395. OpToFold = 1;
  396. } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
  397. OpToFold = 2;
  398. }
  399. if (OpToFold) {
  400. APInt CI = getSelectFoldableConstant(TVI);
  401. Value *OOp = TVI->getOperand(2-OpToFold);
  402. // Avoid creating select between 2 constants unless it's selecting
  403. // between 0, 1 and -1.
  404. const APInt *OOpC;
  405. bool OOpIsAPInt = match(OOp, m_APInt(OOpC));
  406. if (!isa<Constant>(OOp) || (OOpIsAPInt && isSelect01(CI, *OOpC))) {
  407. Value *C = ConstantInt::get(OOp->getType(), CI);
  408. Value *NewSel = Builder.CreateSelect(SI.getCondition(), OOp, C);
  409. NewSel->takeName(TVI);
  410. BinaryOperator *BO = BinaryOperator::Create(TVI->getOpcode(),
  411. FalseVal, NewSel);
  412. BO->copyIRFlags(TVI);
  413. return BO;
  414. }
  415. }
  416. }
  417. }
  418. }
  419. if (auto *FVI = dyn_cast<BinaryOperator>(FalseVal)) {
  420. if (FVI->hasOneUse() && !isa<Constant>(TrueVal)) {
  421. if (unsigned SFO = getSelectFoldableOperands(FVI)) {
  422. unsigned OpToFold = 0;
  423. if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
  424. OpToFold = 1;
  425. } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
  426. OpToFold = 2;
  427. }
  428. if (OpToFold) {
  429. APInt CI = getSelectFoldableConstant(FVI);
  430. Value *OOp = FVI->getOperand(2-OpToFold);
  431. // Avoid creating select between 2 constants unless it's selecting
  432. // between 0, 1 and -1.
  433. const APInt *OOpC;
  434. bool OOpIsAPInt = match(OOp, m_APInt(OOpC));
  435. if (!isa<Constant>(OOp) || (OOpIsAPInt && isSelect01(CI, *OOpC))) {
  436. Value *C = ConstantInt::get(OOp->getType(), CI);
  437. Value *NewSel = Builder.CreateSelect(SI.getCondition(), C, OOp);
  438. NewSel->takeName(FVI);
  439. BinaryOperator *BO = BinaryOperator::Create(FVI->getOpcode(),
  440. TrueVal, NewSel);
  441. BO->copyIRFlags(FVI);
  442. return BO;
  443. }
  444. }
  445. }
  446. }
  447. }
  448. return nullptr;
  449. }
  450. /// We want to turn:
  451. /// (select (icmp eq (and X, Y), 0), (and (lshr X, Z), 1), 1)
  452. /// into:
  453. /// zext (icmp ne i32 (and X, (or Y, (shl 1, Z))), 0)
  454. /// Note:
  455. /// Z may be 0 if lshr is missing.
  456. /// Worst-case scenario is that we will replace 5 instructions with 5 different
  457. /// instructions, but we got rid of select.
  458. static Instruction *foldSelectICmpAndAnd(Type *SelType, const ICmpInst *Cmp,
  459. Value *TVal, Value *FVal,
  460. InstCombiner::BuilderTy &Builder) {
  461. if (!(Cmp->hasOneUse() && Cmp->getOperand(0)->hasOneUse() &&
  462. Cmp->getPredicate() == ICmpInst::ICMP_EQ &&
  463. match(Cmp->getOperand(1), m_Zero()) && match(FVal, m_One())))
  464. return nullptr;
  465. // The TrueVal has general form of: and %B, 1
  466. Value *B;
  467. if (!match(TVal, m_OneUse(m_And(m_Value(B), m_One()))))
  468. return nullptr;
  469. // Where %B may be optionally shifted: lshr %X, %Z.
  470. Value *X, *Z;
  471. const bool HasShift = match(B, m_OneUse(m_LShr(m_Value(X), m_Value(Z))));
  472. if (!HasShift)
  473. X = B;
  474. Value *Y;
  475. if (!match(Cmp->getOperand(0), m_c_And(m_Specific(X), m_Value(Y))))
  476. return nullptr;
  477. // ((X & Y) == 0) ? ((X >> Z) & 1) : 1 --> (X & (Y | (1 << Z))) != 0
  478. // ((X & Y) == 0) ? (X & 1) : 1 --> (X & (Y | 1)) != 0
  479. Constant *One = ConstantInt::get(SelType, 1);
  480. Value *MaskB = HasShift ? Builder.CreateShl(One, Z) : One;
  481. Value *FullMask = Builder.CreateOr(Y, MaskB);
  482. Value *MaskedX = Builder.CreateAnd(X, FullMask);
  483. Value *ICmpNeZero = Builder.CreateIsNotNull(MaskedX);
  484. return new ZExtInst(ICmpNeZero, SelType);
  485. }
  486. /// We want to turn:
  487. /// (select (icmp sgt x, C), lshr (X, Y), ashr (X, Y)); iff C s>= -1
  488. /// (select (icmp slt x, C), ashr (X, Y), lshr (X, Y)); iff C s>= 0
  489. /// into:
  490. /// ashr (X, Y)
  491. static Value *foldSelectICmpLshrAshr(const ICmpInst *IC, Value *TrueVal,
  492. Value *FalseVal,
  493. InstCombiner::BuilderTy &Builder) {
  494. ICmpInst::Predicate Pred = IC->getPredicate();
  495. Value *CmpLHS = IC->getOperand(0);
  496. Value *CmpRHS = IC->getOperand(1);
  497. if (!CmpRHS->getType()->isIntOrIntVectorTy())
  498. return nullptr;
  499. Value *X, *Y;
  500. unsigned Bitwidth = CmpRHS->getType()->getScalarSizeInBits();
  501. if ((Pred != ICmpInst::ICMP_SGT ||
  502. !match(CmpRHS,
  503. m_SpecificInt_ICMP(ICmpInst::ICMP_SGE, APInt(Bitwidth, -1)))) &&
  504. (Pred != ICmpInst::ICMP_SLT ||
  505. !match(CmpRHS,
  506. m_SpecificInt_ICMP(ICmpInst::ICMP_SGE, APInt(Bitwidth, 0)))))
  507. return nullptr;
  508. // Canonicalize so that ashr is in FalseVal.
  509. if (Pred == ICmpInst::ICMP_SLT)
  510. std::swap(TrueVal, FalseVal);
  511. if (match(TrueVal, m_LShr(m_Value(X), m_Value(Y))) &&
  512. match(FalseVal, m_AShr(m_Specific(X), m_Specific(Y))) &&
  513. match(CmpLHS, m_Specific(X))) {
  514. const auto *Ashr = cast<Instruction>(FalseVal);
  515. // if lshr is not exact and ashr is, this new ashr must not be exact.
  516. bool IsExact = Ashr->isExact() && cast<Instruction>(TrueVal)->isExact();
  517. return Builder.CreateAShr(X, Y, IC->getName(), IsExact);
  518. }
  519. return nullptr;
  520. }
  521. /// We want to turn:
  522. /// (select (icmp eq (and X, C1), 0), Y, (or Y, C2))
  523. /// into:
  524. /// (or (shl (and X, C1), C3), Y)
  525. /// iff:
  526. /// C1 and C2 are both powers of 2
  527. /// where:
  528. /// C3 = Log(C2) - Log(C1)
  529. ///
  530. /// This transform handles cases where:
  531. /// 1. The icmp predicate is inverted
  532. /// 2. The select operands are reversed
  533. /// 3. The magnitude of C2 and C1 are flipped
  534. static Value *foldSelectICmpAndOr(const ICmpInst *IC, Value *TrueVal,
  535. Value *FalseVal,
  536. InstCombiner::BuilderTy &Builder) {
  537. // Only handle integer compares. Also, if this is a vector select, we need a
  538. // vector compare.
  539. if (!TrueVal->getType()->isIntOrIntVectorTy() ||
  540. TrueVal->getType()->isVectorTy() != IC->getType()->isVectorTy())
  541. return nullptr;
  542. Value *CmpLHS = IC->getOperand(0);
  543. Value *CmpRHS = IC->getOperand(1);
  544. Value *V;
  545. unsigned C1Log;
  546. bool IsEqualZero;
  547. bool NeedAnd = false;
  548. if (IC->isEquality()) {
  549. if (!match(CmpRHS, m_Zero()))
  550. return nullptr;
  551. const APInt *C1;
  552. if (!match(CmpLHS, m_And(m_Value(), m_Power2(C1))))
  553. return nullptr;
  554. V = CmpLHS;
  555. C1Log = C1->logBase2();
  556. IsEqualZero = IC->getPredicate() == ICmpInst::ICMP_EQ;
  557. } else if (IC->getPredicate() == ICmpInst::ICMP_SLT ||
  558. IC->getPredicate() == ICmpInst::ICMP_SGT) {
  559. // We also need to recognize (icmp slt (trunc (X)), 0) and
  560. // (icmp sgt (trunc (X)), -1).
  561. IsEqualZero = IC->getPredicate() == ICmpInst::ICMP_SGT;
  562. if ((IsEqualZero && !match(CmpRHS, m_AllOnes())) ||
  563. (!IsEqualZero && !match(CmpRHS, m_Zero())))
  564. return nullptr;
  565. if (!match(CmpLHS, m_OneUse(m_Trunc(m_Value(V)))))
  566. return nullptr;
  567. C1Log = CmpLHS->getType()->getScalarSizeInBits() - 1;
  568. NeedAnd = true;
  569. } else {
  570. return nullptr;
  571. }
  572. const APInt *C2;
  573. bool OrOnTrueVal = false;
  574. bool OrOnFalseVal = match(FalseVal, m_Or(m_Specific(TrueVal), m_Power2(C2)));
  575. if (!OrOnFalseVal)
  576. OrOnTrueVal = match(TrueVal, m_Or(m_Specific(FalseVal), m_Power2(C2)));
  577. if (!OrOnFalseVal && !OrOnTrueVal)
  578. return nullptr;
  579. Value *Y = OrOnFalseVal ? TrueVal : FalseVal;
  580. unsigned C2Log = C2->logBase2();
  581. bool NeedXor = (!IsEqualZero && OrOnFalseVal) || (IsEqualZero && OrOnTrueVal);
  582. bool NeedShift = C1Log != C2Log;
  583. bool NeedZExtTrunc = Y->getType()->getScalarSizeInBits() !=
  584. V->getType()->getScalarSizeInBits();
  585. // Make sure we don't create more instructions than we save.
  586. Value *Or = OrOnFalseVal ? FalseVal : TrueVal;
  587. if ((NeedShift + NeedXor + NeedZExtTrunc) >
  588. (IC->hasOneUse() + Or->hasOneUse()))
  589. return nullptr;
  590. if (NeedAnd) {
  591. // Insert the AND instruction on the input to the truncate.
  592. APInt C1 = APInt::getOneBitSet(V->getType()->getScalarSizeInBits(), C1Log);
  593. V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), C1));
  594. }
  595. if (C2Log > C1Log) {
  596. V = Builder.CreateZExtOrTrunc(V, Y->getType());
  597. V = Builder.CreateShl(V, C2Log - C1Log);
  598. } else if (C1Log > C2Log) {
  599. V = Builder.CreateLShr(V, C1Log - C2Log);
  600. V = Builder.CreateZExtOrTrunc(V, Y->getType());
  601. } else
  602. V = Builder.CreateZExtOrTrunc(V, Y->getType());
  603. if (NeedXor)
  604. V = Builder.CreateXor(V, *C2);
  605. return Builder.CreateOr(V, Y);
  606. }
  607. /// Transform patterns such as (a > b) ? a - b : 0 into usub.sat(a, b).
  608. /// There are 8 commuted/swapped variants of this pattern.
  609. /// TODO: Also support a - UMIN(a,b) patterns.
  610. static Value *canonicalizeSaturatedSubtract(const ICmpInst *ICI,
  611. const Value *TrueVal,
  612. const Value *FalseVal,
  613. InstCombiner::BuilderTy &Builder) {
  614. ICmpInst::Predicate Pred = ICI->getPredicate();
  615. if (!ICmpInst::isUnsigned(Pred))
  616. return nullptr;
  617. // (b > a) ? 0 : a - b -> (b <= a) ? a - b : 0
  618. if (match(TrueVal, m_Zero())) {
  619. Pred = ICmpInst::getInversePredicate(Pred);
  620. std::swap(TrueVal, FalseVal);
  621. }
  622. if (!match(FalseVal, m_Zero()))
  623. return nullptr;
  624. Value *A = ICI->getOperand(0);
  625. Value *B = ICI->getOperand(1);
  626. if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_ULT) {
  627. // (b < a) ? a - b : 0 -> (a > b) ? a - b : 0
  628. std::swap(A, B);
  629. Pred = ICmpInst::getSwappedPredicate(Pred);
  630. }
  631. assert((Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_UGT) &&
  632. "Unexpected isUnsigned predicate!");
  633. // Account for swapped form of subtraction: ((a > b) ? b - a : 0).
  634. bool IsNegative = false;
  635. if (match(TrueVal, m_Sub(m_Specific(B), m_Specific(A))))
  636. IsNegative = true;
  637. else if (!match(TrueVal, m_Sub(m_Specific(A), m_Specific(B))))
  638. return nullptr;
  639. // If sub is used anywhere else, we wouldn't be able to eliminate it
  640. // afterwards.
  641. if (!TrueVal->hasOneUse())
  642. return nullptr;
  643. // (a > b) ? a - b : 0 -> usub.sat(a, b)
  644. // (a > b) ? b - a : 0 -> -usub.sat(a, b)
  645. Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, A, B);
  646. if (IsNegative)
  647. Result = Builder.CreateNeg(Result);
  648. return Result;
  649. }
  650. static Value *canonicalizeSaturatedAdd(ICmpInst *Cmp, Value *TVal, Value *FVal,
  651. InstCombiner::BuilderTy &Builder) {
  652. if (!Cmp->hasOneUse())
  653. return nullptr;
  654. // Match unsigned saturated add with constant.
  655. Value *Cmp0 = Cmp->getOperand(0);
  656. Value *Cmp1 = Cmp->getOperand(1);
  657. ICmpInst::Predicate Pred = Cmp->getPredicate();
  658. Value *X;
  659. const APInt *C, *CmpC;
  660. if (Pred == ICmpInst::ICMP_ULT &&
  661. match(TVal, m_Add(m_Value(X), m_APInt(C))) && X == Cmp0 &&
  662. match(FVal, m_AllOnes()) && match(Cmp1, m_APInt(CmpC)) && *CmpC == ~*C) {
  663. // (X u< ~C) ? (X + C) : -1 --> uadd.sat(X, C)
  664. return Builder.CreateBinaryIntrinsic(
  665. Intrinsic::uadd_sat, X, ConstantInt::get(X->getType(), *C));
  666. }
  667. // Match unsigned saturated add of 2 variables with an unnecessary 'not'.
  668. // There are 8 commuted variants.
  669. // Canonicalize -1 (saturated result) to true value of the select. Just
  670. // swapping the compare operands is legal, because the selected value is the
  671. // same in case of equality, so we can interchange u< and u<=.
  672. if (match(FVal, m_AllOnes())) {
  673. std::swap(TVal, FVal);
  674. std::swap(Cmp0, Cmp1);
  675. }
  676. if (!match(TVal, m_AllOnes()))
  677. return nullptr;
  678. // Canonicalize predicate to 'ULT'.
  679. if (Pred == ICmpInst::ICMP_UGT) {
  680. Pred = ICmpInst::ICMP_ULT;
  681. std::swap(Cmp0, Cmp1);
  682. }
  683. if (Pred != ICmpInst::ICMP_ULT)
  684. return nullptr;
  685. // Match unsigned saturated add of 2 variables with an unnecessary 'not'.
  686. Value *Y;
  687. if (match(Cmp0, m_Not(m_Value(X))) &&
  688. match(FVal, m_c_Add(m_Specific(X), m_Value(Y))) && Y == Cmp1) {
  689. // (~X u< Y) ? -1 : (X + Y) --> uadd.sat(X, Y)
  690. // (~X u< Y) ? -1 : (Y + X) --> uadd.sat(X, Y)
  691. return Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat, X, Y);
  692. }
  693. // The 'not' op may be included in the sum but not the compare.
  694. X = Cmp0;
  695. Y = Cmp1;
  696. if (match(FVal, m_c_Add(m_Not(m_Specific(X)), m_Specific(Y)))) {
  697. // (X u< Y) ? -1 : (~X + Y) --> uadd.sat(~X, Y)
  698. // (X u< Y) ? -1 : (Y + ~X) --> uadd.sat(Y, ~X)
  699. BinaryOperator *BO = cast<BinaryOperator>(FVal);
  700. return Builder.CreateBinaryIntrinsic(
  701. Intrinsic::uadd_sat, BO->getOperand(0), BO->getOperand(1));
  702. }
  703. return nullptr;
  704. }
  705. /// Fold the following code sequence:
  706. /// \code
  707. /// int a = ctlz(x & -x);
  708. // x ? 31 - a : a;
  709. /// \code
  710. ///
  711. /// into:
  712. /// cttz(x)
  713. static Instruction *foldSelectCtlzToCttz(ICmpInst *ICI, Value *TrueVal,
  714. Value *FalseVal,
  715. InstCombiner::BuilderTy &Builder) {
  716. unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
  717. if (!ICI->isEquality() || !match(ICI->getOperand(1), m_Zero()))
  718. return nullptr;
  719. if (ICI->getPredicate() == ICmpInst::ICMP_NE)
  720. std::swap(TrueVal, FalseVal);
  721. if (!match(FalseVal,
  722. m_Xor(m_Deferred(TrueVal), m_SpecificInt(BitWidth - 1))))
  723. return nullptr;
  724. if (!match(TrueVal, m_Intrinsic<Intrinsic::ctlz>()))
  725. return nullptr;
  726. Value *X = ICI->getOperand(0);
  727. auto *II = cast<IntrinsicInst>(TrueVal);
  728. if (!match(II->getOperand(0), m_c_And(m_Specific(X), m_Neg(m_Specific(X)))))
  729. return nullptr;
  730. Function *F = Intrinsic::getDeclaration(II->getModule(), Intrinsic::cttz,
  731. II->getType());
  732. return CallInst::Create(F, {X, II->getArgOperand(1)});
  733. }
  734. /// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single
  735. /// call to cttz/ctlz with flag 'is_zero_undef' cleared.
  736. ///
  737. /// For example, we can fold the following code sequence:
  738. /// \code
  739. /// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
  740. /// %1 = icmp ne i32 %x, 0
  741. /// %2 = select i1 %1, i32 %0, i32 32
  742. /// \code
  743. ///
  744. /// into:
  745. /// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
  746. static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
  747. InstCombiner::BuilderTy &Builder) {
  748. ICmpInst::Predicate Pred = ICI->getPredicate();
  749. Value *CmpLHS = ICI->getOperand(0);
  750. Value *CmpRHS = ICI->getOperand(1);
  751. // Check if the condition value compares a value for equality against zero.
  752. if (!ICI->isEquality() || !match(CmpRHS, m_Zero()))
  753. return nullptr;
  754. Value *Count = FalseVal;
  755. Value *ValueOnZero = TrueVal;
  756. if (Pred == ICmpInst::ICMP_NE)
  757. std::swap(Count, ValueOnZero);
  758. // Skip zero extend/truncate.
  759. Value *V = nullptr;
  760. if (match(Count, m_ZExt(m_Value(V))) ||
  761. match(Count, m_Trunc(m_Value(V))))
  762. Count = V;
  763. // Check that 'Count' is a call to intrinsic cttz/ctlz. Also check that the
  764. // input to the cttz/ctlz is used as LHS for the compare instruction.
  765. if (!match(Count, m_Intrinsic<Intrinsic::cttz>(m_Specific(CmpLHS))) &&
  766. !match(Count, m_Intrinsic<Intrinsic::ctlz>(m_Specific(CmpLHS))))
  767. return nullptr;
  768. IntrinsicInst *II = cast<IntrinsicInst>(Count);
  769. // Check if the value propagated on zero is a constant number equal to the
  770. // sizeof in bits of 'Count'.
  771. unsigned SizeOfInBits = Count->getType()->getScalarSizeInBits();
  772. if (match(ValueOnZero, m_SpecificInt(SizeOfInBits))) {
  773. // Explicitly clear the 'undef_on_zero' flag.
  774. IntrinsicInst *NewI = cast<IntrinsicInst>(II->clone());
  775. NewI->setArgOperand(1, ConstantInt::getFalse(NewI->getContext()));
  776. Builder.Insert(NewI);
  777. return Builder.CreateZExtOrTrunc(NewI, ValueOnZero->getType());
  778. }
  779. // If the ValueOnZero is not the bitwidth, we can at least make use of the
  780. // fact that the cttz/ctlz result will not be used if the input is zero, so
  781. // it's okay to relax it to undef for that case.
  782. if (II->hasOneUse() && !match(II->getArgOperand(1), m_One()))
  783. II->setArgOperand(1, ConstantInt::getTrue(II->getContext()));
  784. return nullptr;
  785. }
  786. /// Return true if we find and adjust an icmp+select pattern where the compare
  787. /// is with a constant that can be incremented or decremented to match the
  788. /// minimum or maximum idiom.
  789. static bool adjustMinMax(SelectInst &Sel, ICmpInst &Cmp) {
  790. ICmpInst::Predicate Pred = Cmp.getPredicate();
  791. Value *CmpLHS = Cmp.getOperand(0);
  792. Value *CmpRHS = Cmp.getOperand(1);
  793. Value *TrueVal = Sel.getTrueValue();
  794. Value *FalseVal = Sel.getFalseValue();
  795. // We may move or edit the compare, so make sure the select is the only user.
  796. const APInt *CmpC;
  797. if (!Cmp.hasOneUse() || !match(CmpRHS, m_APInt(CmpC)))
  798. return false;
  799. // These transforms only work for selects of integers or vector selects of
  800. // integer vectors.
  801. Type *SelTy = Sel.getType();
  802. auto *SelEltTy = dyn_cast<IntegerType>(SelTy->getScalarType());
  803. if (!SelEltTy || SelTy->isVectorTy() != Cmp.getType()->isVectorTy())
  804. return false;
  805. Constant *AdjustedRHS;
  806. if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SGT)
  807. AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC + 1);
  808. else if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT)
  809. AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC - 1);
  810. else
  811. return false;
  812. // X > C ? X : C+1 --> X < C+1 ? C+1 : X
  813. // X < C ? X : C-1 --> X > C-1 ? C-1 : X
  814. if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
  815. (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
  816. ; // Nothing to do here. Values match without any sign/zero extension.
  817. }
  818. // Types do not match. Instead of calculating this with mixed types, promote
  819. // all to the larger type. This enables scalar evolution to analyze this
  820. // expression.
  821. else if (CmpRHS->getType()->getScalarSizeInBits() < SelEltTy->getBitWidth()) {
  822. Constant *SextRHS = ConstantExpr::getSExt(AdjustedRHS, SelTy);
  823. // X = sext x; x >s c ? X : C+1 --> X = sext x; X <s C+1 ? C+1 : X
  824. // X = sext x; x <s c ? X : C-1 --> X = sext x; X >s C-1 ? C-1 : X
  825. // X = sext x; x >u c ? X : C+1 --> X = sext x; X <u C+1 ? C+1 : X
  826. // X = sext x; x <u c ? X : C-1 --> X = sext x; X >u C-1 ? C-1 : X
  827. if (match(TrueVal, m_SExt(m_Specific(CmpLHS))) && SextRHS == FalseVal) {
  828. CmpLHS = TrueVal;
  829. AdjustedRHS = SextRHS;
  830. } else if (match(FalseVal, m_SExt(m_Specific(CmpLHS))) &&
  831. SextRHS == TrueVal) {
  832. CmpLHS = FalseVal;
  833. AdjustedRHS = SextRHS;
  834. } else if (Cmp.isUnsigned()) {
  835. Constant *ZextRHS = ConstantExpr::getZExt(AdjustedRHS, SelTy);
  836. // X = zext x; x >u c ? X : C+1 --> X = zext x; X <u C+1 ? C+1 : X
  837. // X = zext x; x <u c ? X : C-1 --> X = zext x; X >u C-1 ? C-1 : X
  838. // zext + signed compare cannot be changed:
  839. // 0xff <s 0x00, but 0x00ff >s 0x0000
  840. if (match(TrueVal, m_ZExt(m_Specific(CmpLHS))) && ZextRHS == FalseVal) {
  841. CmpLHS = TrueVal;
  842. AdjustedRHS = ZextRHS;
  843. } else if (match(FalseVal, m_ZExt(m_Specific(CmpLHS))) &&
  844. ZextRHS == TrueVal) {
  845. CmpLHS = FalseVal;
  846. AdjustedRHS = ZextRHS;
  847. } else {
  848. return false;
  849. }
  850. } else {
  851. return false;
  852. }
  853. } else {
  854. return false;
  855. }
  856. Pred = ICmpInst::getSwappedPredicate(Pred);
  857. CmpRHS = AdjustedRHS;
  858. std::swap(FalseVal, TrueVal);
  859. Cmp.setPredicate(Pred);
  860. Cmp.setOperand(0, CmpLHS);
  861. Cmp.setOperand(1, CmpRHS);
  862. Sel.setOperand(1, TrueVal);
  863. Sel.setOperand(2, FalseVal);
  864. Sel.swapProfMetadata();
  865. // Move the compare instruction right before the select instruction. Otherwise
  866. // the sext/zext value may be defined after the compare instruction uses it.
  867. Cmp.moveBefore(&Sel);
  868. return true;
  869. }
  870. /// If this is an integer min/max (icmp + select) with a constant operand,
  871. /// create the canonical icmp for the min/max operation and canonicalize the
  872. /// constant to the 'false' operand of the select:
  873. /// select (icmp Pred X, C1), C2, X --> select (icmp Pred' X, C2), X, C2
  874. /// Note: if C1 != C2, this will change the icmp constant to the existing
  875. /// constant operand of the select.
  876. static Instruction *
  877. canonicalizeMinMaxWithConstant(SelectInst &Sel, ICmpInst &Cmp,
  878. InstCombiner::BuilderTy &Builder) {
  879. if (!Cmp.hasOneUse() || !isa<Constant>(Cmp.getOperand(1)))
  880. return nullptr;
  881. // Canonicalize the compare predicate based on whether we have min or max.
  882. Value *LHS, *RHS;
  883. SelectPatternResult SPR = matchSelectPattern(&Sel, LHS, RHS);
  884. if (!SelectPatternResult::isMinOrMax(SPR.Flavor))
  885. return nullptr;
  886. // Is this already canonical?
  887. ICmpInst::Predicate CanonicalPred = getMinMaxPred(SPR.Flavor);
  888. if (Cmp.getOperand(0) == LHS && Cmp.getOperand(1) == RHS &&
  889. Cmp.getPredicate() == CanonicalPred)
  890. return nullptr;
  891. // Create the canonical compare and plug it into the select.
  892. Sel.setCondition(Builder.CreateICmp(CanonicalPred, LHS, RHS));
  893. // If the select operands did not change, we're done.
  894. if (Sel.getTrueValue() == LHS && Sel.getFalseValue() == RHS)
  895. return &Sel;
  896. // If we are swapping the select operands, swap the metadata too.
  897. assert(Sel.getTrueValue() == RHS && Sel.getFalseValue() == LHS &&
  898. "Unexpected results from matchSelectPattern");
  899. Sel.swapValues();
  900. Sel.swapProfMetadata();
  901. return &Sel;
  902. }
  903. /// There are many select variants for each of ABS/NABS.
  904. /// In matchSelectPattern(), there are different compare constants, compare
  905. /// predicates/operands and select operands.
  906. /// In isKnownNegation(), there are different formats of negated operands.
  907. /// Canonicalize all these variants to 1 pattern.
  908. /// This makes CSE more likely.
  909. static Instruction *canonicalizeAbsNabs(SelectInst &Sel, ICmpInst &Cmp,
  910. InstCombiner::BuilderTy &Builder) {
  911. if (!Cmp.hasOneUse() || !isa<Constant>(Cmp.getOperand(1)))
  912. return nullptr;
  913. // Choose a sign-bit check for the compare (likely simpler for codegen).
  914. // ABS: (X <s 0) ? -X : X
  915. // NABS: (X <s 0) ? X : -X
  916. Value *LHS, *RHS;
  917. SelectPatternFlavor SPF = matchSelectPattern(&Sel, LHS, RHS).Flavor;
  918. if (SPF != SelectPatternFlavor::SPF_ABS &&
  919. SPF != SelectPatternFlavor::SPF_NABS)
  920. return nullptr;
  921. Value *TVal = Sel.getTrueValue();
  922. Value *FVal = Sel.getFalseValue();
  923. assert(isKnownNegation(TVal, FVal) &&
  924. "Unexpected result from matchSelectPattern");
  925. // The compare may use the negated abs()/nabs() operand, or it may use
  926. // negation in non-canonical form such as: sub A, B.
  927. bool CmpUsesNegatedOp = match(Cmp.getOperand(0), m_Neg(m_Specific(TVal))) ||
  928. match(Cmp.getOperand(0), m_Neg(m_Specific(FVal)));
  929. bool CmpCanonicalized = !CmpUsesNegatedOp &&
  930. match(Cmp.getOperand(1), m_ZeroInt()) &&
  931. Cmp.getPredicate() == ICmpInst::ICMP_SLT;
  932. bool RHSCanonicalized = match(RHS, m_Neg(m_Specific(LHS)));
  933. // Is this already canonical?
  934. if (CmpCanonicalized && RHSCanonicalized)
  935. return nullptr;
  936. // If RHS is used by other instructions except compare and select, don't
  937. // canonicalize it to not increase the instruction count.
  938. if (!(RHS->hasOneUse() || (RHS->hasNUses(2) && CmpUsesNegatedOp)))
  939. return nullptr;
  940. // Create the canonical compare: icmp slt LHS 0.
  941. if (!CmpCanonicalized) {
  942. Cmp.setPredicate(ICmpInst::ICMP_SLT);
  943. Cmp.setOperand(1, ConstantInt::getNullValue(Cmp.getOperand(0)->getType()));
  944. if (CmpUsesNegatedOp)
  945. Cmp.setOperand(0, LHS);
  946. }
  947. // Create the canonical RHS: RHS = sub (0, LHS).
  948. if (!RHSCanonicalized) {
  949. assert(RHS->hasOneUse() && "RHS use number is not right");
  950. RHS = Builder.CreateNeg(LHS);
  951. if (TVal == LHS) {
  952. Sel.setFalseValue(RHS);
  953. FVal = RHS;
  954. } else {
  955. Sel.setTrueValue(RHS);
  956. TVal = RHS;
  957. }
  958. }
  959. // If the select operands do not change, we're done.
  960. if (SPF == SelectPatternFlavor::SPF_NABS) {
  961. if (TVal == LHS)
  962. return &Sel;
  963. assert(FVal == LHS && "Unexpected results from matchSelectPattern");
  964. } else {
  965. if (FVal == LHS)
  966. return &Sel;
  967. assert(TVal == LHS && "Unexpected results from matchSelectPattern");
  968. }
  969. // We are swapping the select operands, so swap the metadata too.
  970. Sel.swapValues();
  971. Sel.swapProfMetadata();
  972. return &Sel;
  973. }
  974. static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *ReplaceOp,
  975. const SimplifyQuery &Q) {
  976. // If this is a binary operator, try to simplify it with the replaced op
  977. // because we know Op and ReplaceOp are equivalant.
  978. // For example: V = X + 1, Op = X, ReplaceOp = 42
  979. // Simplifies as: add(42, 1) --> 43
  980. if (auto *BO = dyn_cast<BinaryOperator>(V)) {
  981. if (BO->getOperand(0) == Op)
  982. return SimplifyBinOp(BO->getOpcode(), ReplaceOp, BO->getOperand(1), Q);
  983. if (BO->getOperand(1) == Op)
  984. return SimplifyBinOp(BO->getOpcode(), BO->getOperand(0), ReplaceOp, Q);
  985. }
  986. return nullptr;
  987. }
  988. /// If we have a select with an equality comparison, then we know the value in
  989. /// one of the arms of the select. See if substituting this value into an arm
  990. /// and simplifying the result yields the same value as the other arm.
  991. ///
  992. /// To make this transform safe, we must drop poison-generating flags
  993. /// (nsw, etc) if we simplified to a binop because the select may be guarding
  994. /// that poison from propagating. If the existing binop already had no
  995. /// poison-generating flags, then this transform can be done by instsimplify.
  996. ///
  997. /// Consider:
  998. /// %cmp = icmp eq i32 %x, 2147483647
  999. /// %add = add nsw i32 %x, 1
  1000. /// %sel = select i1 %cmp, i32 -2147483648, i32 %add
  1001. ///
  1002. /// We can't replace %sel with %add unless we strip away the flags.
  1003. /// TODO: Wrapping flags could be preserved in some cases with better analysis.
  1004. static Value *foldSelectValueEquivalence(SelectInst &Sel, ICmpInst &Cmp,
  1005. const SimplifyQuery &Q) {
  1006. if (!Cmp.isEquality())
  1007. return nullptr;
  1008. // Canonicalize the pattern to ICMP_EQ by swapping the select operands.
  1009. Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue();
  1010. if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
  1011. std::swap(TrueVal, FalseVal);
  1012. // Try each equivalence substitution possibility.
  1013. // We have an 'EQ' comparison, so the select's false value will propagate.
  1014. // Example:
  1015. // (X == 42) ? 43 : (X + 1) --> (X == 42) ? (X + 1) : (X + 1) --> X + 1
  1016. // (X == 42) ? (X + 1) : 43 --> (X == 42) ? (42 + 1) : 43 --> 43
  1017. Value *CmpLHS = Cmp.getOperand(0), *CmpRHS = Cmp.getOperand(1);
  1018. if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q) == TrueVal ||
  1019. simplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q) == TrueVal ||
  1020. simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q) == FalseVal ||
  1021. simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q) == FalseVal) {
  1022. if (auto *FalseInst = dyn_cast<Instruction>(FalseVal))
  1023. FalseInst->dropPoisonGeneratingFlags();
  1024. return FalseVal;
  1025. }
  1026. return nullptr;
  1027. }
  1028. // See if this is a pattern like:
  1029. // %old_cmp1 = icmp slt i32 %x, C2
  1030. // %old_replacement = select i1 %old_cmp1, i32 %target_low, i32 %target_high
  1031. // %old_x_offseted = add i32 %x, C1
  1032. // %old_cmp0 = icmp ult i32 %old_x_offseted, C0
  1033. // %r = select i1 %old_cmp0, i32 %x, i32 %old_replacement
  1034. // This can be rewritten as more canonical pattern:
  1035. // %new_cmp1 = icmp slt i32 %x, -C1
  1036. // %new_cmp2 = icmp sge i32 %x, C0-C1
  1037. // %new_clamped_low = select i1 %new_cmp1, i32 %target_low, i32 %x
  1038. // %r = select i1 %new_cmp2, i32 %target_high, i32 %new_clamped_low
  1039. // Iff -C1 s<= C2 s<= C0-C1
  1040. // Also ULT predicate can also be UGT iff C0 != -1 (+invert result)
  1041. // SLT predicate can also be SGT iff C2 != INT_MAX (+invert res.)
  1042. static Instruction *canonicalizeClampLike(SelectInst &Sel0, ICmpInst &Cmp0,
  1043. InstCombiner::BuilderTy &Builder) {
  1044. Value *X = Sel0.getTrueValue();
  1045. Value *Sel1 = Sel0.getFalseValue();
  1046. // First match the condition of the outermost select.
  1047. // Said condition must be one-use.
  1048. if (!Cmp0.hasOneUse())
  1049. return nullptr;
  1050. Value *Cmp00 = Cmp0.getOperand(0);
  1051. Constant *C0;
  1052. if (!match(Cmp0.getOperand(1),
  1053. m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C0))))
  1054. return nullptr;
  1055. // Canonicalize Cmp0 into the form we expect.
  1056. // FIXME: we shouldn't care about lanes that are 'undef' in the end?
  1057. switch (Cmp0.getPredicate()) {
  1058. case ICmpInst::Predicate::ICMP_ULT:
  1059. break; // Great!
  1060. case ICmpInst::Predicate::ICMP_ULE:
  1061. // We'd have to increment C0 by one, and for that it must not have all-ones
  1062. // element, but then it would have been canonicalized to 'ult' before
  1063. // we get here. So we can't do anything useful with 'ule'.
  1064. return nullptr;
  1065. case ICmpInst::Predicate::ICMP_UGT:
  1066. // We want to canonicalize it to 'ult', so we'll need to increment C0,
  1067. // which again means it must not have any all-ones elements.
  1068. if (!match(C0,
  1069. m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_NE,
  1070. APInt::getAllOnesValue(
  1071. C0->getType()->getScalarSizeInBits()))))
  1072. return nullptr; // Can't do, have all-ones element[s].
  1073. C0 = AddOne(C0);
  1074. std::swap(X, Sel1);
  1075. break;
  1076. case ICmpInst::Predicate::ICMP_UGE:
  1077. // The only way we'd get this predicate if this `icmp` has extra uses,
  1078. // but then we won't be able to do this fold.
  1079. return nullptr;
  1080. default:
  1081. return nullptr; // Unknown predicate.
  1082. }
  1083. // Now that we've canonicalized the ICmp, we know the X we expect;
  1084. // the select in other hand should be one-use.
  1085. if (!Sel1->hasOneUse())
  1086. return nullptr;
  1087. // We now can finish matching the condition of the outermost select:
  1088. // it should either be the X itself, or an addition of some constant to X.
  1089. Constant *C1;
  1090. if (Cmp00 == X)
  1091. C1 = ConstantInt::getNullValue(Sel0.getType());
  1092. else if (!match(Cmp00,
  1093. m_Add(m_Specific(X),
  1094. m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C1)))))
  1095. return nullptr;
  1096. Value *Cmp1;
  1097. ICmpInst::Predicate Pred1;
  1098. Constant *C2;
  1099. Value *ReplacementLow, *ReplacementHigh;
  1100. if (!match(Sel1, m_Select(m_Value(Cmp1), m_Value(ReplacementLow),
  1101. m_Value(ReplacementHigh))) ||
  1102. !match(Cmp1,
  1103. m_ICmp(Pred1, m_Specific(X),
  1104. m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C2)))))
  1105. return nullptr;
  1106. if (!Cmp1->hasOneUse() && (Cmp00 == X || !Cmp00->hasOneUse()))
  1107. return nullptr; // Not enough one-use instructions for the fold.
  1108. // FIXME: this restriction could be relaxed if Cmp1 can be reused as one of
  1109. // two comparisons we'll need to build.
  1110. // Canonicalize Cmp1 into the form we expect.
  1111. // FIXME: we shouldn't care about lanes that are 'undef' in the end?
  1112. switch (Pred1) {
  1113. case ICmpInst::Predicate::ICMP_SLT:
  1114. break;
  1115. case ICmpInst::Predicate::ICMP_SLE:
  1116. // We'd have to increment C2 by one, and for that it must not have signed
  1117. // max element, but then it would have been canonicalized to 'slt' before
  1118. // we get here. So we can't do anything useful with 'sle'.
  1119. return nullptr;
  1120. case ICmpInst::Predicate::ICMP_SGT:
  1121. // We want to canonicalize it to 'slt', so we'll need to increment C2,
  1122. // which again means it must not have any signed max elements.
  1123. if (!match(C2,
  1124. m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_NE,
  1125. APInt::getSignedMaxValue(
  1126. C2->getType()->getScalarSizeInBits()))))
  1127. return nullptr; // Can't do, have signed max element[s].
  1128. C2 = AddOne(C2);
  1129. LLVM_FALLTHROUGH;
  1130. case ICmpInst::Predicate::ICMP_SGE:
  1131. // Also non-canonical, but here we don't need to change C2,
  1132. // so we don't have any restrictions on C2, so we can just handle it.
  1133. std::swap(ReplacementLow, ReplacementHigh);
  1134. break;
  1135. default:
  1136. return nullptr; // Unknown predicate.
  1137. }
  1138. // The thresholds of this clamp-like pattern.
  1139. auto *ThresholdLowIncl = ConstantExpr::getNeg(C1);
  1140. auto *ThresholdHighExcl = ConstantExpr::getSub(C0, C1);
  1141. // The fold has a precondition 1: C2 s>= ThresholdLow
  1142. auto *Precond1 = ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_SGE, C2,
  1143. ThresholdLowIncl);
  1144. if (!match(Precond1, m_One()))
  1145. return nullptr;
  1146. // The fold has a precondition 2: C2 s<= ThresholdHigh
  1147. auto *Precond2 = ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_SLE, C2,
  1148. ThresholdHighExcl);
  1149. if (!match(Precond2, m_One()))
  1150. return nullptr;
  1151. // All good, finally emit the new pattern.
  1152. Value *ShouldReplaceLow = Builder.CreateICmpSLT(X, ThresholdLowIncl);
  1153. Value *ShouldReplaceHigh = Builder.CreateICmpSGE(X, ThresholdHighExcl);
  1154. Value *MaybeReplacedLow =
  1155. Builder.CreateSelect(ShouldReplaceLow, ReplacementLow, X);
  1156. Instruction *MaybeReplacedHigh =
  1157. SelectInst::Create(ShouldReplaceHigh, ReplacementHigh, MaybeReplacedLow);
  1158. return MaybeReplacedHigh;
  1159. }
  1160. // If we have
  1161. // %cmp = icmp [canonical predicate] i32 %x, C0
  1162. // %r = select i1 %cmp, i32 %y, i32 C1
  1163. // Where C0 != C1 and %x may be different from %y, see if the constant that we
  1164. // will have if we flip the strictness of the predicate (i.e. without changing
  1165. // the result) is identical to the C1 in select. If it matches we can change
  1166. // original comparison to one with swapped predicate, reuse the constant,
  1167. // and swap the hands of select.
  1168. static Instruction *
  1169. tryToReuseConstantFromSelectInComparison(SelectInst &Sel, ICmpInst &Cmp,
  1170. InstCombiner::BuilderTy &Builder) {
  1171. ICmpInst::Predicate Pred;
  1172. Value *X;
  1173. Constant *C0;
  1174. if (!match(&Cmp, m_OneUse(m_ICmp(
  1175. Pred, m_Value(X),
  1176. m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C0))))))
  1177. return nullptr;
  1178. // If comparison predicate is non-relational, we won't be able to do anything.
  1179. if (ICmpInst::isEquality(Pred))
  1180. return nullptr;
  1181. // If comparison predicate is non-canonical, then we certainly won't be able
  1182. // to make it canonical; canonicalizeCmpWithConstant() already tried.
  1183. if (!isCanonicalPredicate(Pred))
  1184. return nullptr;
  1185. // If the [input] type of comparison and select type are different, lets abort
  1186. // for now. We could try to compare constants with trunc/[zs]ext though.
  1187. if (C0->getType() != Sel.getType())
  1188. return nullptr;
  1189. // FIXME: are there any magic icmp predicate+constant pairs we must not touch?
  1190. Value *SelVal0, *SelVal1; // We do not care which one is from where.
  1191. match(&Sel, m_Select(m_Value(), m_Value(SelVal0), m_Value(SelVal1)));
  1192. // At least one of these values we are selecting between must be a constant
  1193. // else we'll never succeed.
  1194. if (!match(SelVal0, m_AnyIntegralConstant()) &&
  1195. !match(SelVal1, m_AnyIntegralConstant()))
  1196. return nullptr;
  1197. // Does this constant C match any of the `select` values?
  1198. auto MatchesSelectValue = [SelVal0, SelVal1](Constant *C) {
  1199. return C->isElementWiseEqual(SelVal0) || C->isElementWiseEqual(SelVal1);
  1200. };
  1201. // If C0 *already* matches true/false value of select, we are done.
  1202. if (MatchesSelectValue(C0))
  1203. return nullptr;
  1204. // Check the constant we'd have with flipped-strictness predicate.
  1205. auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(Pred, C0);
  1206. if (!FlippedStrictness)
  1207. return nullptr;
  1208. // If said constant doesn't match either, then there is no hope,
  1209. if (!MatchesSelectValue(FlippedStrictness->second))
  1210. return nullptr;
  1211. // It matched! Lets insert the new comparison just before select.
  1212. InstCombiner::BuilderTy::InsertPointGuard Guard(Builder);
  1213. Builder.SetInsertPoint(&Sel);
  1214. Pred = ICmpInst::getSwappedPredicate(Pred); // Yes, swapped.
  1215. Value *NewCmp = Builder.CreateICmp(Pred, X, FlippedStrictness->second,
  1216. Cmp.getName() + ".inv");
  1217. Sel.setCondition(NewCmp);
  1218. Sel.swapValues();
  1219. Sel.swapProfMetadata();
  1220. return &Sel;
  1221. }
  1222. /// Visit a SelectInst that has an ICmpInst as its first operand.
  1223. Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
  1224. ICmpInst *ICI) {
  1225. if (Value *V = foldSelectValueEquivalence(SI, *ICI, SQ))
  1226. return replaceInstUsesWith(SI, V);
  1227. if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, Builder))
  1228. return NewSel;
  1229. if (Instruction *NewAbs = canonicalizeAbsNabs(SI, *ICI, Builder))
  1230. return NewAbs;
  1231. if (Instruction *NewAbs = canonicalizeClampLike(SI, *ICI, Builder))
  1232. return NewAbs;
  1233. if (Instruction *NewSel =
  1234. tryToReuseConstantFromSelectInComparison(SI, *ICI, Builder))
  1235. return NewSel;
  1236. bool Changed = adjustMinMax(SI, *ICI);
  1237. if (Value *V = foldSelectICmpAnd(SI, ICI, Builder))
  1238. return replaceInstUsesWith(SI, V);
  1239. // NOTE: if we wanted to, this is where to detect integer MIN/MAX
  1240. Value *TrueVal = SI.getTrueValue();
  1241. Value *FalseVal = SI.getFalseValue();
  1242. ICmpInst::Predicate Pred = ICI->getPredicate();
  1243. Value *CmpLHS = ICI->getOperand(0);
  1244. Value *CmpRHS = ICI->getOperand(1);
  1245. if (CmpRHS != CmpLHS && isa<Constant>(CmpRHS)) {
  1246. if (CmpLHS == TrueVal && Pred == ICmpInst::ICMP_EQ) {
  1247. // Transform (X == C) ? X : Y -> (X == C) ? C : Y
  1248. SI.setOperand(1, CmpRHS);
  1249. Changed = true;
  1250. } else if (CmpLHS == FalseVal && Pred == ICmpInst::ICMP_NE) {
  1251. // Transform (X != C) ? Y : X -> (X != C) ? Y : C
  1252. SI.setOperand(2, CmpRHS);
  1253. Changed = true;
  1254. }
  1255. }
  1256. // FIXME: This code is nearly duplicated in InstSimplify. Using/refactoring
  1257. // decomposeBitTestICmp() might help.
  1258. {
  1259. unsigned BitWidth =
  1260. DL.getTypeSizeInBits(TrueVal->getType()->getScalarType());
  1261. APInt MinSignedValue = APInt::getSignedMinValue(BitWidth);
  1262. Value *X;
  1263. const APInt *Y, *C;
  1264. bool TrueWhenUnset;
  1265. bool IsBitTest = false;
  1266. if (ICmpInst::isEquality(Pred) &&
  1267. match(CmpLHS, m_And(m_Value(X), m_Power2(Y))) &&
  1268. match(CmpRHS, m_Zero())) {
  1269. IsBitTest = true;
  1270. TrueWhenUnset = Pred == ICmpInst::ICMP_EQ;
  1271. } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
  1272. X = CmpLHS;
  1273. Y = &MinSignedValue;
  1274. IsBitTest = true;
  1275. TrueWhenUnset = false;
  1276. } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
  1277. X = CmpLHS;
  1278. Y = &MinSignedValue;
  1279. IsBitTest = true;
  1280. TrueWhenUnset = true;
  1281. }
  1282. if (IsBitTest) {
  1283. Value *V = nullptr;
  1284. // (X & Y) == 0 ? X : X ^ Y --> X & ~Y
  1285. if (TrueWhenUnset && TrueVal == X &&
  1286. match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
  1287. V = Builder.CreateAnd(X, ~(*Y));
  1288. // (X & Y) != 0 ? X ^ Y : X --> X & ~Y
  1289. else if (!TrueWhenUnset && FalseVal == X &&
  1290. match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
  1291. V = Builder.CreateAnd(X, ~(*Y));
  1292. // (X & Y) == 0 ? X ^ Y : X --> X | Y
  1293. else if (TrueWhenUnset && FalseVal == X &&
  1294. match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
  1295. V = Builder.CreateOr(X, *Y);
  1296. // (X & Y) != 0 ? X : X ^ Y --> X | Y
  1297. else if (!TrueWhenUnset && TrueVal == X &&
  1298. match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
  1299. V = Builder.CreateOr(X, *Y);
  1300. if (V)
  1301. return replaceInstUsesWith(SI, V);
  1302. }
  1303. }
  1304. if (Instruction *V =
  1305. foldSelectICmpAndAnd(SI.getType(), ICI, TrueVal, FalseVal, Builder))
  1306. return V;
  1307. if (Instruction *V = foldSelectCtlzToCttz(ICI, TrueVal, FalseVal, Builder))
  1308. return V;
  1309. if (Value *V = foldSelectICmpAndOr(ICI, TrueVal, FalseVal, Builder))
  1310. return replaceInstUsesWith(SI, V);
  1311. if (Value *V = foldSelectICmpLshrAshr(ICI, TrueVal, FalseVal, Builder))
  1312. return replaceInstUsesWith(SI, V);
  1313. if (Value *V = foldSelectCttzCtlz(ICI, TrueVal, FalseVal, Builder))
  1314. return replaceInstUsesWith(SI, V);
  1315. if (Value *V = canonicalizeSaturatedSubtract(ICI, TrueVal, FalseVal, Builder))
  1316. return replaceInstUsesWith(SI, V);
  1317. if (Value *V = canonicalizeSaturatedAdd(ICI, TrueVal, FalseVal, Builder))
  1318. return replaceInstUsesWith(SI, V);
  1319. return Changed ? &SI : nullptr;
  1320. }
  1321. /// SI is a select whose condition is a PHI node (but the two may be in
  1322. /// different blocks). See if the true/false values (V) are live in all of the
  1323. /// predecessor blocks of the PHI. For example, cases like this can't be mapped:
  1324. ///
  1325. /// X = phi [ C1, BB1], [C2, BB2]
  1326. /// Y = add
  1327. /// Z = select X, Y, 0
  1328. ///
  1329. /// because Y is not live in BB1/BB2.
  1330. static bool canSelectOperandBeMappingIntoPredBlock(const Value *V,
  1331. const SelectInst &SI) {
  1332. // If the value is a non-instruction value like a constant or argument, it
  1333. // can always be mapped.
  1334. const Instruction *I = dyn_cast<Instruction>(V);
  1335. if (!I) return true;
  1336. // If V is a PHI node defined in the same block as the condition PHI, we can
  1337. // map the arguments.
  1338. const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
  1339. if (const PHINode *VP = dyn_cast<PHINode>(I))
  1340. if (VP->getParent() == CondPHI->getParent())
  1341. return true;
  1342. // Otherwise, if the PHI and select are defined in the same block and if V is
  1343. // defined in a different block, then we can transform it.
  1344. if (SI.getParent() == CondPHI->getParent() &&
  1345. I->getParent() != CondPHI->getParent())
  1346. return true;
  1347. // Otherwise we have a 'hard' case and we can't tell without doing more
  1348. // detailed dominator based analysis, punt.
  1349. return false;
  1350. }
  1351. /// We have an SPF (e.g. a min or max) of an SPF of the form:
  1352. /// SPF2(SPF1(A, B), C)
  1353. Instruction *InstCombiner::foldSPFofSPF(Instruction *Inner,
  1354. SelectPatternFlavor SPF1,
  1355. Value *A, Value *B,
  1356. Instruction &Outer,
  1357. SelectPatternFlavor SPF2, Value *C) {
  1358. if (Outer.getType() != Inner->getType())
  1359. return nullptr;
  1360. if (C == A || C == B) {
  1361. // MAX(MAX(A, B), B) -> MAX(A, B)
  1362. // MIN(MIN(a, b), a) -> MIN(a, b)
  1363. // TODO: This could be done in instsimplify.
  1364. if (SPF1 == SPF2 && SelectPatternResult::isMinOrMax(SPF1))
  1365. return replaceInstUsesWith(Outer, Inner);
  1366. // MAX(MIN(a, b), a) -> a
  1367. // MIN(MAX(a, b), a) -> a
  1368. // TODO: This could be done in instsimplify.
  1369. if ((SPF1 == SPF_SMIN && SPF2 == SPF_SMAX) ||
  1370. (SPF1 == SPF_SMAX && SPF2 == SPF_SMIN) ||
  1371. (SPF1 == SPF_UMIN && SPF2 == SPF_UMAX) ||
  1372. (SPF1 == SPF_UMAX && SPF2 == SPF_UMIN))
  1373. return replaceInstUsesWith(Outer, C);
  1374. }
  1375. if (SPF1 == SPF2) {
  1376. const APInt *CB, *CC;
  1377. if (match(B, m_APInt(CB)) && match(C, m_APInt(CC))) {
  1378. // MIN(MIN(A, 23), 97) -> MIN(A, 23)
  1379. // MAX(MAX(A, 97), 23) -> MAX(A, 97)
  1380. // TODO: This could be done in instsimplify.
  1381. if ((SPF1 == SPF_UMIN && CB->ule(*CC)) ||
  1382. (SPF1 == SPF_SMIN && CB->sle(*CC)) ||
  1383. (SPF1 == SPF_UMAX && CB->uge(*CC)) ||
  1384. (SPF1 == SPF_SMAX && CB->sge(*CC)))
  1385. return replaceInstUsesWith(Outer, Inner);
  1386. // MIN(MIN(A, 97), 23) -> MIN(A, 23)
  1387. // MAX(MAX(A, 23), 97) -> MAX(A, 97)
  1388. if ((SPF1 == SPF_UMIN && CB->ugt(*CC)) ||
  1389. (SPF1 == SPF_SMIN && CB->sgt(*CC)) ||
  1390. (SPF1 == SPF_UMAX && CB->ult(*CC)) ||
  1391. (SPF1 == SPF_SMAX && CB->slt(*CC))) {
  1392. Outer.replaceUsesOfWith(Inner, A);
  1393. return &Outer;
  1394. }
  1395. }
  1396. }
  1397. // max(max(A, B), min(A, B)) --> max(A, B)
  1398. // min(min(A, B), max(A, B)) --> min(A, B)
  1399. // TODO: This could be done in instsimplify.
  1400. if (SPF1 == SPF2 &&
  1401. ((SPF1 == SPF_UMIN && match(C, m_c_UMax(m_Specific(A), m_Specific(B)))) ||
  1402. (SPF1 == SPF_SMIN && match(C, m_c_SMax(m_Specific(A), m_Specific(B)))) ||
  1403. (SPF1 == SPF_UMAX && match(C, m_c_UMin(m_Specific(A), m_Specific(B)))) ||
  1404. (SPF1 == SPF_SMAX && match(C, m_c_SMin(m_Specific(A), m_Specific(B))))))
  1405. return replaceInstUsesWith(Outer, Inner);
  1406. // ABS(ABS(X)) -> ABS(X)
  1407. // NABS(NABS(X)) -> NABS(X)
  1408. // TODO: This could be done in instsimplify.
  1409. if (SPF1 == SPF2 && (SPF1 == SPF_ABS || SPF1 == SPF_NABS)) {
  1410. return replaceInstUsesWith(Outer, Inner);
  1411. }
  1412. // ABS(NABS(X)) -> ABS(X)
  1413. // NABS(ABS(X)) -> NABS(X)
  1414. if ((SPF1 == SPF_ABS && SPF2 == SPF_NABS) ||
  1415. (SPF1 == SPF_NABS && SPF2 == SPF_ABS)) {
  1416. SelectInst *SI = cast<SelectInst>(Inner);
  1417. Value *NewSI =
  1418. Builder.CreateSelect(SI->getCondition(), SI->getFalseValue(),
  1419. SI->getTrueValue(), SI->getName(), SI);
  1420. return replaceInstUsesWith(Outer, NewSI);
  1421. }
  1422. auto IsFreeOrProfitableToInvert =
  1423. [&](Value *V, Value *&NotV, bool &ElidesXor) {
  1424. if (match(V, m_Not(m_Value(NotV)))) {
  1425. // If V has at most 2 uses then we can get rid of the xor operation
  1426. // entirely.
  1427. ElidesXor |= !V->hasNUsesOrMore(3);
  1428. return true;
  1429. }
  1430. if (isFreeToInvert(V, !V->hasNUsesOrMore(3))) {
  1431. NotV = nullptr;
  1432. return true;
  1433. }
  1434. return false;
  1435. };
  1436. Value *NotA, *NotB, *NotC;
  1437. bool ElidesXor = false;
  1438. // MIN(MIN(~A, ~B), ~C) == ~MAX(MAX(A, B), C)
  1439. // MIN(MAX(~A, ~B), ~C) == ~MAX(MIN(A, B), C)
  1440. // MAX(MIN(~A, ~B), ~C) == ~MIN(MAX(A, B), C)
  1441. // MAX(MAX(~A, ~B), ~C) == ~MIN(MIN(A, B), C)
  1442. //
  1443. // This transform is performance neutral if we can elide at least one xor from
  1444. // the set of three operands, since we'll be tacking on an xor at the very
  1445. // end.
  1446. if (SelectPatternResult::isMinOrMax(SPF1) &&
  1447. SelectPatternResult::isMinOrMax(SPF2) &&
  1448. IsFreeOrProfitableToInvert(A, NotA, ElidesXor) &&
  1449. IsFreeOrProfitableToInvert(B, NotB, ElidesXor) &&
  1450. IsFreeOrProfitableToInvert(C, NotC, ElidesXor) && ElidesXor) {
  1451. if (!NotA)
  1452. NotA = Builder.CreateNot(A);
  1453. if (!NotB)
  1454. NotB = Builder.CreateNot(B);
  1455. if (!NotC)
  1456. NotC = Builder.CreateNot(C);
  1457. Value *NewInner = createMinMax(Builder, getInverseMinMaxFlavor(SPF1), NotA,
  1458. NotB);
  1459. Value *NewOuter = Builder.CreateNot(
  1460. createMinMax(Builder, getInverseMinMaxFlavor(SPF2), NewInner, NotC));
  1461. return replaceInstUsesWith(Outer, NewOuter);
  1462. }
  1463. return nullptr;
  1464. }
  1465. /// Turn select C, (X + Y), (X - Y) --> (X + (select C, Y, (-Y))).
  1466. /// This is even legal for FP.
  1467. static Instruction *foldAddSubSelect(SelectInst &SI,
  1468. InstCombiner::BuilderTy &Builder) {
  1469. Value *CondVal = SI.getCondition();
  1470. Value *TrueVal = SI.getTrueValue();
  1471. Value *FalseVal = SI.getFalseValue();
  1472. auto *TI = dyn_cast<Instruction>(TrueVal);
  1473. auto *FI = dyn_cast<Instruction>(FalseVal);
  1474. if (!TI || !FI || !TI->hasOneUse() || !FI->hasOneUse())
  1475. return nullptr;
  1476. Instruction *AddOp = nullptr, *SubOp = nullptr;
  1477. if ((TI->getOpcode() == Instruction::Sub &&
  1478. FI->getOpcode() == Instruction::Add) ||
  1479. (TI->getOpcode() == Instruction::FSub &&
  1480. FI->getOpcode() == Instruction::FAdd)) {
  1481. AddOp = FI;
  1482. SubOp = TI;
  1483. } else if ((FI->getOpcode() == Instruction::Sub &&
  1484. TI->getOpcode() == Instruction::Add) ||
  1485. (FI->getOpcode() == Instruction::FSub &&
  1486. TI->getOpcode() == Instruction::FAdd)) {
  1487. AddOp = TI;
  1488. SubOp = FI;
  1489. }
  1490. if (AddOp) {
  1491. Value *OtherAddOp = nullptr;
  1492. if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
  1493. OtherAddOp = AddOp->getOperand(1);
  1494. } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
  1495. OtherAddOp = AddOp->getOperand(0);
  1496. }
  1497. if (OtherAddOp) {
  1498. // So at this point we know we have (Y -> OtherAddOp):
  1499. // select C, (add X, Y), (sub X, Z)
  1500. Value *NegVal; // Compute -Z
  1501. if (SI.getType()->isFPOrFPVectorTy()) {
  1502. NegVal = Builder.CreateFNeg(SubOp->getOperand(1));
  1503. if (Instruction *NegInst = dyn_cast<Instruction>(NegVal)) {
  1504. FastMathFlags Flags = AddOp->getFastMathFlags();
  1505. Flags &= SubOp->getFastMathFlags();
  1506. NegInst->setFastMathFlags(Flags);
  1507. }
  1508. } else {
  1509. NegVal = Builder.CreateNeg(SubOp->getOperand(1));
  1510. }
  1511. Value *NewTrueOp = OtherAddOp;
  1512. Value *NewFalseOp = NegVal;
  1513. if (AddOp != TI)
  1514. std::swap(NewTrueOp, NewFalseOp);
  1515. Value *NewSel = Builder.CreateSelect(CondVal, NewTrueOp, NewFalseOp,
  1516. SI.getName() + ".p", &SI);
  1517. if (SI.getType()->isFPOrFPVectorTy()) {
  1518. Instruction *RI =
  1519. BinaryOperator::CreateFAdd(SubOp->getOperand(0), NewSel);
  1520. FastMathFlags Flags = AddOp->getFastMathFlags();
  1521. Flags &= SubOp->getFastMathFlags();
  1522. RI->setFastMathFlags(Flags);
  1523. return RI;
  1524. } else
  1525. return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
  1526. }
  1527. }
  1528. return nullptr;
  1529. }
  1530. Instruction *InstCombiner::foldSelectExtConst(SelectInst &Sel) {
  1531. Constant *C;
  1532. if (!match(Sel.getTrueValue(), m_Constant(C)) &&
  1533. !match(Sel.getFalseValue(), m_Constant(C)))
  1534. return nullptr;
  1535. Instruction *ExtInst;
  1536. if (!match(Sel.getTrueValue(), m_Instruction(ExtInst)) &&
  1537. !match(Sel.getFalseValue(), m_Instruction(ExtInst)))
  1538. return nullptr;
  1539. auto ExtOpcode = ExtInst->getOpcode();
  1540. if (ExtOpcode != Instruction::ZExt && ExtOpcode != Instruction::SExt)
  1541. return nullptr;
  1542. // If we are extending from a boolean type or if we can create a select that
  1543. // has the same size operands as its condition, try to narrow the select.
  1544. Value *X = ExtInst->getOperand(0);
  1545. Type *SmallType = X->getType();
  1546. Value *Cond = Sel.getCondition();
  1547. auto *Cmp = dyn_cast<CmpInst>(Cond);
  1548. if (!SmallType->isIntOrIntVectorTy(1) &&
  1549. (!Cmp || Cmp->getOperand(0)->getType() != SmallType))
  1550. return nullptr;
  1551. // If the constant is the same after truncation to the smaller type and
  1552. // extension to the original type, we can narrow the select.
  1553. Type *SelType = Sel.getType();
  1554. Constant *TruncC = ConstantExpr::getTrunc(C, SmallType);
  1555. Constant *ExtC = ConstantExpr::getCast(ExtOpcode, TruncC, SelType);
  1556. if (ExtC == C) {
  1557. Value *TruncCVal = cast<Value>(TruncC);
  1558. if (ExtInst == Sel.getFalseValue())
  1559. std::swap(X, TruncCVal);
  1560. // select Cond, (ext X), C --> ext(select Cond, X, C')
  1561. // select Cond, C, (ext X) --> ext(select Cond, C', X)
  1562. Value *NewSel = Builder.CreateSelect(Cond, X, TruncCVal, "narrow", &Sel);
  1563. return CastInst::Create(Instruction::CastOps(ExtOpcode), NewSel, SelType);
  1564. }
  1565. // If one arm of the select is the extend of the condition, replace that arm
  1566. // with the extension of the appropriate known bool value.
  1567. if (Cond == X) {
  1568. if (ExtInst == Sel.getTrueValue()) {
  1569. // select X, (sext X), C --> select X, -1, C
  1570. // select X, (zext X), C --> select X, 1, C
  1571. Constant *One = ConstantInt::getTrue(SmallType);
  1572. Constant *AllOnesOrOne = ConstantExpr::getCast(ExtOpcode, One, SelType);
  1573. return SelectInst::Create(Cond, AllOnesOrOne, C, "", nullptr, &Sel);
  1574. } else {
  1575. // select X, C, (sext X) --> select X, C, 0
  1576. // select X, C, (zext X) --> select X, C, 0
  1577. Constant *Zero = ConstantInt::getNullValue(SelType);
  1578. return SelectInst::Create(Cond, C, Zero, "", nullptr, &Sel);
  1579. }
  1580. }
  1581. return nullptr;
  1582. }
  1583. /// Try to transform a vector select with a constant condition vector into a
  1584. /// shuffle for easier combining with other shuffles and insert/extract.
  1585. static Instruction *canonicalizeSelectToShuffle(SelectInst &SI) {
  1586. Value *CondVal = SI.getCondition();
  1587. Constant *CondC;
  1588. if (!CondVal->getType()->isVectorTy() || !match(CondVal, m_Constant(CondC)))
  1589. return nullptr;
  1590. unsigned NumElts = CondVal->getType()->getVectorNumElements();
  1591. SmallVector<Constant *, 16> Mask;
  1592. Mask.reserve(NumElts);
  1593. Type *Int32Ty = Type::getInt32Ty(CondVal->getContext());
  1594. for (unsigned i = 0; i != NumElts; ++i) {
  1595. Constant *Elt = CondC->getAggregateElement(i);
  1596. if (!Elt)
  1597. return nullptr;
  1598. if (Elt->isOneValue()) {
  1599. // If the select condition element is true, choose from the 1st vector.
  1600. Mask.push_back(ConstantInt::get(Int32Ty, i));
  1601. } else if (Elt->isNullValue()) {
  1602. // If the select condition element is false, choose from the 2nd vector.
  1603. Mask.push_back(ConstantInt::get(Int32Ty, i + NumElts));
  1604. } else if (isa<UndefValue>(Elt)) {
  1605. // Undef in a select condition (choose one of the operands) does not mean
  1606. // the same thing as undef in a shuffle mask (any value is acceptable), so
  1607. // give up.
  1608. return nullptr;
  1609. } else {
  1610. // Bail out on a constant expression.
  1611. return nullptr;
  1612. }
  1613. }
  1614. return new ShuffleVectorInst(SI.getTrueValue(), SI.getFalseValue(),
  1615. ConstantVector::get(Mask));
  1616. }
  1617. /// If we have a select of vectors with a scalar condition, try to convert that
  1618. /// to a vector select by splatting the condition. A splat may get folded with
  1619. /// other operations in IR and having all operands of a select be vector types
  1620. /// is likely better for vector codegen.
  1621. static Instruction *canonicalizeScalarSelectOfVecs(
  1622. SelectInst &Sel, InstCombiner::BuilderTy &Builder) {
  1623. Type *Ty = Sel.getType();
  1624. if (!Ty->isVectorTy())
  1625. return nullptr;
  1626. // We can replace a single-use extract with constant index.
  1627. Value *Cond = Sel.getCondition();
  1628. if (!match(Cond, m_OneUse(m_ExtractElement(m_Value(), m_ConstantInt()))))
  1629. return nullptr;
  1630. // select (extelt V, Index), T, F --> select (splat V, Index), T, F
  1631. // Splatting the extracted condition reduces code (we could directly create a
  1632. // splat shuffle of the source vector to eliminate the intermediate step).
  1633. unsigned NumElts = Ty->getVectorNumElements();
  1634. Value *SplatCond = Builder.CreateVectorSplat(NumElts, Cond);
  1635. Sel.setCondition(SplatCond);
  1636. return &Sel;
  1637. }
  1638. /// Reuse bitcasted operands between a compare and select:
  1639. /// select (cmp (bitcast C), (bitcast D)), (bitcast' C), (bitcast' D) -->
  1640. /// bitcast (select (cmp (bitcast C), (bitcast D)), (bitcast C), (bitcast D))
  1641. static Instruction *foldSelectCmpBitcasts(SelectInst &Sel,
  1642. InstCombiner::BuilderTy &Builder) {
  1643. Value *Cond = Sel.getCondition();
  1644. Value *TVal = Sel.getTrueValue();
  1645. Value *FVal = Sel.getFalseValue();
  1646. CmpInst::Predicate Pred;
  1647. Value *A, *B;
  1648. if (!match(Cond, m_Cmp(Pred, m_Value(A), m_Value(B))))
  1649. return nullptr;
  1650. // The select condition is a compare instruction. If the select's true/false
  1651. // values are already the same as the compare operands, there's nothing to do.
  1652. if (TVal == A || TVal == B || FVal == A || FVal == B)
  1653. return nullptr;
  1654. Value *C, *D;
  1655. if (!match(A, m_BitCast(m_Value(C))) || !match(B, m_BitCast(m_Value(D))))
  1656. return nullptr;
  1657. // select (cmp (bitcast C), (bitcast D)), (bitcast TSrc), (bitcast FSrc)
  1658. Value *TSrc, *FSrc;
  1659. if (!match(TVal, m_BitCast(m_Value(TSrc))) ||
  1660. !match(FVal, m_BitCast(m_Value(FSrc))))
  1661. return nullptr;
  1662. // If the select true/false values are *different bitcasts* of the same source
  1663. // operands, make the select operands the same as the compare operands and
  1664. // cast the result. This is the canonical select form for min/max.
  1665. Value *NewSel;
  1666. if (TSrc == C && FSrc == D) {
  1667. // select (cmp (bitcast C), (bitcast D)), (bitcast' C), (bitcast' D) -->
  1668. // bitcast (select (cmp A, B), A, B)
  1669. NewSel = Builder.CreateSelect(Cond, A, B, "", &Sel);
  1670. } else if (TSrc == D && FSrc == C) {
  1671. // select (cmp (bitcast C), (bitcast D)), (bitcast' D), (bitcast' C) -->
  1672. // bitcast (select (cmp A, B), B, A)
  1673. NewSel = Builder.CreateSelect(Cond, B, A, "", &Sel);
  1674. } else {
  1675. return nullptr;
  1676. }
  1677. return CastInst::CreateBitOrPointerCast(NewSel, Sel.getType());
  1678. }
  1679. /// Try to eliminate select instructions that test the returned flag of cmpxchg
  1680. /// instructions.
  1681. ///
  1682. /// If a select instruction tests the returned flag of a cmpxchg instruction and
  1683. /// selects between the returned value of the cmpxchg instruction its compare
  1684. /// operand, the result of the select will always be equal to its false value.
  1685. /// For example:
  1686. ///
  1687. /// %0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
  1688. /// %1 = extractvalue { i64, i1 } %0, 1
  1689. /// %2 = extractvalue { i64, i1 } %0, 0
  1690. /// %3 = select i1 %1, i64 %compare, i64 %2
  1691. /// ret i64 %3
  1692. ///
  1693. /// The returned value of the cmpxchg instruction (%2) is the original value
  1694. /// located at %ptr prior to any update. If the cmpxchg operation succeeds, %2
  1695. /// must have been equal to %compare. Thus, the result of the select is always
  1696. /// equal to %2, and the code can be simplified to:
  1697. ///
  1698. /// %0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
  1699. /// %1 = extractvalue { i64, i1 } %0, 0
  1700. /// ret i64 %1
  1701. ///
  1702. static Instruction *foldSelectCmpXchg(SelectInst &SI) {
  1703. // A helper that determines if V is an extractvalue instruction whose
  1704. // aggregate operand is a cmpxchg instruction and whose single index is equal
  1705. // to I. If such conditions are true, the helper returns the cmpxchg
  1706. // instruction; otherwise, a nullptr is returned.
  1707. auto isExtractFromCmpXchg = [](Value *V, unsigned I) -> AtomicCmpXchgInst * {
  1708. auto *Extract = dyn_cast<ExtractValueInst>(V);
  1709. if (!Extract)
  1710. return nullptr;
  1711. if (Extract->getIndices()[0] != I)
  1712. return nullptr;
  1713. return dyn_cast<AtomicCmpXchgInst>(Extract->getAggregateOperand());
  1714. };
  1715. // If the select has a single user, and this user is a select instruction that
  1716. // we can simplify, skip the cmpxchg simplification for now.
  1717. if (SI.hasOneUse())
  1718. if (auto *Select = dyn_cast<SelectInst>(SI.user_back()))
  1719. if (Select->getCondition() == SI.getCondition())
  1720. if (Select->getFalseValue() == SI.getTrueValue() ||
  1721. Select->getTrueValue() == SI.getFalseValue())
  1722. return nullptr;
  1723. // Ensure the select condition is the returned flag of a cmpxchg instruction.
  1724. auto *CmpXchg = isExtractFromCmpXchg(SI.getCondition(), 1);
  1725. if (!CmpXchg)
  1726. return nullptr;
  1727. // Check the true value case: The true value of the select is the returned
  1728. // value of the same cmpxchg used by the condition, and the false value is the
  1729. // cmpxchg instruction's compare operand.
  1730. if (auto *X = isExtractFromCmpXchg(SI.getTrueValue(), 0))
  1731. if (X == CmpXchg && X->getCompareOperand() == SI.getFalseValue()) {
  1732. SI.setTrueValue(SI.getFalseValue());
  1733. return &SI;
  1734. }
  1735. // Check the false value case: The false value of the select is the returned
  1736. // value of the same cmpxchg used by the condition, and the true value is the
  1737. // cmpxchg instruction's compare operand.
  1738. if (auto *X = isExtractFromCmpXchg(SI.getFalseValue(), 0))
  1739. if (X == CmpXchg && X->getCompareOperand() == SI.getTrueValue()) {
  1740. SI.setTrueValue(SI.getFalseValue());
  1741. return &SI;
  1742. }
  1743. return nullptr;
  1744. }
  1745. static Instruction *moveAddAfterMinMax(SelectPatternFlavor SPF, Value *X,
  1746. Value *Y,
  1747. InstCombiner::BuilderTy &Builder) {
  1748. assert(SelectPatternResult::isMinOrMax(SPF) && "Expected min/max pattern");
  1749. bool IsUnsigned = SPF == SelectPatternFlavor::SPF_UMIN ||
  1750. SPF == SelectPatternFlavor::SPF_UMAX;
  1751. // TODO: If InstSimplify could fold all cases where C2 <= C1, we could change
  1752. // the constant value check to an assert.
  1753. Value *A;
  1754. const APInt *C1, *C2;
  1755. if (IsUnsigned && match(X, m_NUWAdd(m_Value(A), m_APInt(C1))) &&
  1756. match(Y, m_APInt(C2)) && C2->uge(*C1) && X->hasNUses(2)) {
  1757. // umin (add nuw A, C1), C2 --> add nuw (umin A, C2 - C1), C1
  1758. // umax (add nuw A, C1), C2 --> add nuw (umax A, C2 - C1), C1
  1759. Value *NewMinMax = createMinMax(Builder, SPF, A,
  1760. ConstantInt::get(X->getType(), *C2 - *C1));
  1761. return BinaryOperator::CreateNUW(BinaryOperator::Add, NewMinMax,
  1762. ConstantInt::get(X->getType(), *C1));
  1763. }
  1764. if (!IsUnsigned && match(X, m_NSWAdd(m_Value(A), m_APInt(C1))) &&
  1765. match(Y, m_APInt(C2)) && X->hasNUses(2)) {
  1766. bool Overflow;
  1767. APInt Diff = C2->ssub_ov(*C1, Overflow);
  1768. if (!Overflow) {
  1769. // smin (add nsw A, C1), C2 --> add nsw (smin A, C2 - C1), C1
  1770. // smax (add nsw A, C1), C2 --> add nsw (smax A, C2 - C1), C1
  1771. Value *NewMinMax = createMinMax(Builder, SPF, A,
  1772. ConstantInt::get(X->getType(), Diff));
  1773. return BinaryOperator::CreateNSW(BinaryOperator::Add, NewMinMax,
  1774. ConstantInt::get(X->getType(), *C1));
  1775. }
  1776. }
  1777. return nullptr;
  1778. }
  1779. /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value.
  1780. Instruction *InstCombiner::matchSAddSubSat(SelectInst &MinMax1) {
  1781. Type *Ty = MinMax1.getType();
  1782. // We are looking for a tree of:
  1783. // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B))))
  1784. // Where the min and max could be reversed
  1785. Instruction *MinMax2;
  1786. BinaryOperator *AddSub;
  1787. const APInt *MinValue, *MaxValue;
  1788. if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) {
  1789. if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue))))
  1790. return nullptr;
  1791. } else if (match(&MinMax1,
  1792. m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) {
  1793. if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue))))
  1794. return nullptr;
  1795. } else
  1796. return nullptr;
  1797. // Check that the constants clamp a saturate, and that the new type would be
  1798. // sensible to convert to.
  1799. if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
  1800. return nullptr;
  1801. // In what bitwidth can this be treated as saturating arithmetics?
  1802. unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
  1803. // FIXME: This isn't quite right for vectors, but using the scalar type is a
  1804. // good first approximation for what should be done there.
  1805. if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth))
  1806. return nullptr;
  1807. // Also make sure that the number of uses is as expected. The "3"s are for the
  1808. // the two items of min/max (the compare and the select).
  1809. if (MinMax2->hasNUsesOrMore(3) || AddSub->hasNUsesOrMore(3))
  1810. return nullptr;
  1811. // Create the new type (which can be a vector type)
  1812. Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth);
  1813. // Match the two extends from the add/sub
  1814. Value *A, *B;
  1815. if(!match(AddSub, m_BinOp(m_SExt(m_Value(A)), m_SExt(m_Value(B)))))
  1816. return nullptr;
  1817. // And check the incoming values are of a type smaller than or equal to the
  1818. // size of the saturation. Otherwise the higher bits can cause different
  1819. // results.
  1820. if (A->getType()->getScalarSizeInBits() > NewBitWidth ||
  1821. B->getType()->getScalarSizeInBits() > NewBitWidth)
  1822. return nullptr;
  1823. Intrinsic::ID IntrinsicID;
  1824. if (AddSub->getOpcode() == Instruction::Add)
  1825. IntrinsicID = Intrinsic::sadd_sat;
  1826. else if (AddSub->getOpcode() == Instruction::Sub)
  1827. IntrinsicID = Intrinsic::ssub_sat;
  1828. else
  1829. return nullptr;
  1830. // Finally create and return the sat intrinsic, truncated to the new type
  1831. Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy);
  1832. Value *AT = Builder.CreateSExt(A, NewTy);
  1833. Value *BT = Builder.CreateSExt(B, NewTy);
  1834. Value *Sat = Builder.CreateCall(F, {AT, BT});
  1835. return CastInst::Create(Instruction::SExt, Sat, Ty);
  1836. }
  1837. /// Reduce a sequence of min/max with a common operand.
  1838. static Instruction *factorizeMinMaxTree(SelectPatternFlavor SPF, Value *LHS,
  1839. Value *RHS,
  1840. InstCombiner::BuilderTy &Builder) {
  1841. assert(SelectPatternResult::isMinOrMax(SPF) && "Expected a min/max");
  1842. // TODO: Allow FP min/max with nnan/nsz.
  1843. if (!LHS->getType()->isIntOrIntVectorTy())
  1844. return nullptr;
  1845. // Match 3 of the same min/max ops. Example: umin(umin(), umin()).
  1846. Value *A, *B, *C, *D;
  1847. SelectPatternResult L = matchSelectPattern(LHS, A, B);
  1848. SelectPatternResult R = matchSelectPattern(RHS, C, D);
  1849. if (SPF != L.Flavor || L.Flavor != R.Flavor)
  1850. return nullptr;
  1851. // Look for a common operand. The use checks are different than usual because
  1852. // a min/max pattern typically has 2 uses of each op: 1 by the cmp and 1 by
  1853. // the select.
  1854. Value *MinMaxOp = nullptr;
  1855. Value *ThirdOp = nullptr;
  1856. if (!LHS->hasNUsesOrMore(3) && RHS->hasNUsesOrMore(3)) {
  1857. // If the LHS is only used in this chain and the RHS is used outside of it,
  1858. // reuse the RHS min/max because that will eliminate the LHS.
  1859. if (D == A || C == A) {
  1860. // min(min(a, b), min(c, a)) --> min(min(c, a), b)
  1861. // min(min(a, b), min(a, d)) --> min(min(a, d), b)
  1862. MinMaxOp = RHS;
  1863. ThirdOp = B;
  1864. } else if (D == B || C == B) {
  1865. // min(min(a, b), min(c, b)) --> min(min(c, b), a)
  1866. // min(min(a, b), min(b, d)) --> min(min(b, d), a)
  1867. MinMaxOp = RHS;
  1868. ThirdOp = A;
  1869. }
  1870. } else if (!RHS->hasNUsesOrMore(3)) {
  1871. // Reuse the LHS. This will eliminate the RHS.
  1872. if (D == A || D == B) {
  1873. // min(min(a, b), min(c, a)) --> min(min(a, b), c)
  1874. // min(min(a, b), min(c, b)) --> min(min(a, b), c)
  1875. MinMaxOp = LHS;
  1876. ThirdOp = C;
  1877. } else if (C == A || C == B) {
  1878. // min(min(a, b), min(b, d)) --> min(min(a, b), d)
  1879. // min(min(a, b), min(c, b)) --> min(min(a, b), d)
  1880. MinMaxOp = LHS;
  1881. ThirdOp = D;
  1882. }
  1883. }
  1884. if (!MinMaxOp || !ThirdOp)
  1885. return nullptr;
  1886. CmpInst::Predicate P = getMinMaxPred(SPF);
  1887. Value *CmpABC = Builder.CreateICmp(P, MinMaxOp, ThirdOp);
  1888. return SelectInst::Create(CmpABC, MinMaxOp, ThirdOp);
  1889. }
  1890. /// Try to reduce a rotate pattern that includes a compare and select into a
  1891. /// funnel shift intrinsic. Example:
  1892. /// rotl32(a, b) --> (b == 0 ? a : ((a >> (32 - b)) | (a << b)))
  1893. /// --> call llvm.fshl.i32(a, a, b)
  1894. static Instruction *foldSelectRotate(SelectInst &Sel) {
  1895. // The false value of the select must be a rotate of the true value.
  1896. Value *Or0, *Or1;
  1897. if (!match(Sel.getFalseValue(), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1)))))
  1898. return nullptr;
  1899. Value *TVal = Sel.getTrueValue();
  1900. Value *SA0, *SA1;
  1901. if (!match(Or0, m_OneUse(m_LogicalShift(m_Specific(TVal), m_Value(SA0)))) ||
  1902. !match(Or1, m_OneUse(m_LogicalShift(m_Specific(TVal), m_Value(SA1)))))
  1903. return nullptr;
  1904. auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode();
  1905. auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode();
  1906. if (ShiftOpcode0 == ShiftOpcode1)
  1907. return nullptr;
  1908. // We have one of these patterns so far:
  1909. // select ?, TVal, (or (lshr TVal, SA0), (shl TVal, SA1))
  1910. // select ?, TVal, (or (shl TVal, SA0), (lshr TVal, SA1))
  1911. // This must be a power-of-2 rotate for a bitmasking transform to be valid.
  1912. unsigned Width = Sel.getType()->getScalarSizeInBits();
  1913. if (!isPowerOf2_32(Width))
  1914. return nullptr;
  1915. // Check the shift amounts to see if they are an opposite pair.
  1916. Value *ShAmt;
  1917. if (match(SA1, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA0)))))
  1918. ShAmt = SA0;
  1919. else if (match(SA0, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA1)))))
  1920. ShAmt = SA1;
  1921. else
  1922. return nullptr;
  1923. // Finally, see if the select is filtering out a shift-by-zero.
  1924. Value *Cond = Sel.getCondition();
  1925. ICmpInst::Predicate Pred;
  1926. if (!match(Cond, m_OneUse(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()))) ||
  1927. Pred != ICmpInst::ICMP_EQ)
  1928. return nullptr;
  1929. // This is a rotate that avoids shift-by-bitwidth UB in a suboptimal way.
  1930. // Convert to funnel shift intrinsic.
  1931. bool IsFshl = (ShAmt == SA0 && ShiftOpcode0 == BinaryOperator::Shl) ||
  1932. (ShAmt == SA1 && ShiftOpcode1 == BinaryOperator::Shl);
  1933. Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
  1934. Function *F = Intrinsic::getDeclaration(Sel.getModule(), IID, Sel.getType());
  1935. return IntrinsicInst::Create(F, { TVal, TVal, ShAmt });
  1936. }
  1937. Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
  1938. Value *CondVal = SI.getCondition();
  1939. Value *TrueVal = SI.getTrueValue();
  1940. Value *FalseVal = SI.getFalseValue();
  1941. Type *SelType = SI.getType();
  1942. // FIXME: Remove this workaround when freeze related patches are done.
  1943. // For select with undef operand which feeds into an equality comparison,
  1944. // don't simplify it so loop unswitch can know the equality comparison
  1945. // may have an undef operand. This is a workaround for PR31652 caused by
  1946. // descrepancy about branch on undef between LoopUnswitch and GVN.
  1947. if (isa<UndefValue>(TrueVal) || isa<UndefValue>(FalseVal)) {
  1948. if (llvm::any_of(SI.users(), [&](User *U) {
  1949. ICmpInst *CI = dyn_cast<ICmpInst>(U);
  1950. if (CI && CI->isEquality())
  1951. return true;
  1952. return false;
  1953. })) {
  1954. return nullptr;
  1955. }
  1956. }
  1957. if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal,
  1958. SQ.getWithInstruction(&SI)))
  1959. return replaceInstUsesWith(SI, V);
  1960. if (Instruction *I = canonicalizeSelectToShuffle(SI))
  1961. return I;
  1962. if (Instruction *I = canonicalizeScalarSelectOfVecs(SI, Builder))
  1963. return I;
  1964. // Canonicalize a one-use integer compare with a non-canonical predicate by
  1965. // inverting the predicate and swapping the select operands. This matches a
  1966. // compare canonicalization for conditional branches.
  1967. // TODO: Should we do the same for FP compares?
  1968. CmpInst::Predicate Pred;
  1969. if (match(CondVal, m_OneUse(m_ICmp(Pred, m_Value(), m_Value()))) &&
  1970. !isCanonicalPredicate(Pred)) {
  1971. // Swap true/false values and condition.
  1972. CmpInst *Cond = cast<CmpInst>(CondVal);
  1973. Cond->setPredicate(CmpInst::getInversePredicate(Pred));
  1974. SI.setOperand(1, FalseVal);
  1975. SI.setOperand(2, TrueVal);
  1976. SI.swapProfMetadata();
  1977. Worklist.Add(Cond);
  1978. return &SI;
  1979. }
  1980. if (SelType->isIntOrIntVectorTy(1) &&
  1981. TrueVal->getType() == CondVal->getType()) {
  1982. if (match(TrueVal, m_One())) {
  1983. // Change: A = select B, true, C --> A = or B, C
  1984. return BinaryOperator::CreateOr(CondVal, FalseVal);
  1985. }
  1986. if (match(TrueVal, m_Zero())) {
  1987. // Change: A = select B, false, C --> A = and !B, C
  1988. Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
  1989. return BinaryOperator::CreateAnd(NotCond, FalseVal);
  1990. }
  1991. if (match(FalseVal, m_Zero())) {
  1992. // Change: A = select B, C, false --> A = and B, C
  1993. return BinaryOperator::CreateAnd(CondVal, TrueVal);
  1994. }
  1995. if (match(FalseVal, m_One())) {
  1996. // Change: A = select B, C, true --> A = or !B, C
  1997. Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
  1998. return BinaryOperator::CreateOr(NotCond, TrueVal);
  1999. }
  2000. // select a, a, b -> a | b
  2001. // select a, b, a -> a & b
  2002. if (CondVal == TrueVal)
  2003. return BinaryOperator::CreateOr(CondVal, FalseVal);
  2004. if (CondVal == FalseVal)
  2005. return BinaryOperator::CreateAnd(CondVal, TrueVal);
  2006. // select a, ~a, b -> (~a) & b
  2007. // select a, b, ~a -> (~a) | b
  2008. if (match(TrueVal, m_Not(m_Specific(CondVal))))
  2009. return BinaryOperator::CreateAnd(TrueVal, FalseVal);
  2010. if (match(FalseVal, m_Not(m_Specific(CondVal))))
  2011. return BinaryOperator::CreateOr(TrueVal, FalseVal);
  2012. }
  2013. // Selecting between two integer or vector splat integer constants?
  2014. //
  2015. // Note that we don't handle a scalar select of vectors:
  2016. // select i1 %c, <2 x i8> <1, 1>, <2 x i8> <0, 0>
  2017. // because that may need 3 instructions to splat the condition value:
  2018. // extend, insertelement, shufflevector.
  2019. if (SelType->isIntOrIntVectorTy() &&
  2020. CondVal->getType()->isVectorTy() == SelType->isVectorTy()) {
  2021. // select C, 1, 0 -> zext C to int
  2022. if (match(TrueVal, m_One()) && match(FalseVal, m_Zero()))
  2023. return new ZExtInst(CondVal, SelType);
  2024. // select C, -1, 0 -> sext C to int
  2025. if (match(TrueVal, m_AllOnes()) && match(FalseVal, m_Zero()))
  2026. return new SExtInst(CondVal, SelType);
  2027. // select C, 0, 1 -> zext !C to int
  2028. if (match(TrueVal, m_Zero()) && match(FalseVal, m_One())) {
  2029. Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
  2030. return new ZExtInst(NotCond, SelType);
  2031. }
  2032. // select C, 0, -1 -> sext !C to int
  2033. if (match(TrueVal, m_Zero()) && match(FalseVal, m_AllOnes())) {
  2034. Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
  2035. return new SExtInst(NotCond, SelType);
  2036. }
  2037. }
  2038. // See if we are selecting two values based on a comparison of the two values.
  2039. if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
  2040. if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
  2041. // Canonicalize to use ordered comparisons by swapping the select
  2042. // operands.
  2043. //
  2044. // e.g.
  2045. // (X ugt Y) ? X : Y -> (X ole Y) ? Y : X
  2046. if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) {
  2047. FCmpInst::Predicate InvPred = FCI->getInversePredicate();
  2048. IRBuilder<>::FastMathFlagGuard FMFG(Builder);
  2049. Builder.setFastMathFlags(FCI->getFastMathFlags());
  2050. Value *NewCond = Builder.CreateFCmp(InvPred, TrueVal, FalseVal,
  2051. FCI->getName() + ".inv");
  2052. return SelectInst::Create(NewCond, FalseVal, TrueVal,
  2053. SI.getName() + ".p");
  2054. }
  2055. // NOTE: if we wanted to, this is where to detect MIN/MAX
  2056. } else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
  2057. // Canonicalize to use ordered comparisons by swapping the select
  2058. // operands.
  2059. //
  2060. // e.g.
  2061. // (X ugt Y) ? X : Y -> (X ole Y) ? X : Y
  2062. if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) {
  2063. FCmpInst::Predicate InvPred = FCI->getInversePredicate();
  2064. IRBuilder<>::FastMathFlagGuard FMFG(Builder);
  2065. Builder.setFastMathFlags(FCI->getFastMathFlags());
  2066. Value *NewCond = Builder.CreateFCmp(InvPred, FalseVal, TrueVal,
  2067. FCI->getName() + ".inv");
  2068. return SelectInst::Create(NewCond, FalseVal, TrueVal,
  2069. SI.getName() + ".p");
  2070. }
  2071. // NOTE: if we wanted to, this is where to detect MIN/MAX
  2072. }
  2073. }
  2074. // Canonicalize select with fcmp to fabs(). -0.0 makes this tricky. We need
  2075. // fast-math-flags (nsz) or fsub with +0.0 (not fneg) for this to work. We
  2076. // also require nnan because we do not want to unintentionally change the
  2077. // sign of a NaN value.
  2078. // FIXME: These folds should test/propagate FMF from the select, not the
  2079. // fsub or fneg.
  2080. // (X <= +/-0.0) ? (0.0 - X) : X --> fabs(X)
  2081. Instruction *FSub;
  2082. if (match(CondVal, m_FCmp(Pred, m_Specific(FalseVal), m_AnyZeroFP())) &&
  2083. match(TrueVal, m_FSub(m_PosZeroFP(), m_Specific(FalseVal))) &&
  2084. match(TrueVal, m_Instruction(FSub)) && FSub->hasNoNaNs() &&
  2085. (Pred == FCmpInst::FCMP_OLE || Pred == FCmpInst::FCMP_ULE)) {
  2086. Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FalseVal, FSub);
  2087. return replaceInstUsesWith(SI, Fabs);
  2088. }
  2089. // (X > +/-0.0) ? X : (0.0 - X) --> fabs(X)
  2090. if (match(CondVal, m_FCmp(Pred, m_Specific(TrueVal), m_AnyZeroFP())) &&
  2091. match(FalseVal, m_FSub(m_PosZeroFP(), m_Specific(TrueVal))) &&
  2092. match(FalseVal, m_Instruction(FSub)) && FSub->hasNoNaNs() &&
  2093. (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_UGT)) {
  2094. Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, TrueVal, FSub);
  2095. return replaceInstUsesWith(SI, Fabs);
  2096. }
  2097. // With nnan and nsz:
  2098. // (X < +/-0.0) ? -X : X --> fabs(X)
  2099. // (X <= +/-0.0) ? -X : X --> fabs(X)
  2100. Instruction *FNeg;
  2101. if (match(CondVal, m_FCmp(Pred, m_Specific(FalseVal), m_AnyZeroFP())) &&
  2102. match(TrueVal, m_FNeg(m_Specific(FalseVal))) &&
  2103. match(TrueVal, m_Instruction(FNeg)) &&
  2104. FNeg->hasNoNaNs() && FNeg->hasNoSignedZeros() &&
  2105. (Pred == FCmpInst::FCMP_OLT || Pred == FCmpInst::FCMP_OLE ||
  2106. Pred == FCmpInst::FCMP_ULT || Pred == FCmpInst::FCMP_ULE)) {
  2107. Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FalseVal, FNeg);
  2108. return replaceInstUsesWith(SI, Fabs);
  2109. }
  2110. // With nnan and nsz:
  2111. // (X > +/-0.0) ? X : -X --> fabs(X)
  2112. // (X >= +/-0.0) ? X : -X --> fabs(X)
  2113. if (match(CondVal, m_FCmp(Pred, m_Specific(TrueVal), m_AnyZeroFP())) &&
  2114. match(FalseVal, m_FNeg(m_Specific(TrueVal))) &&
  2115. match(FalseVal, m_Instruction(FNeg)) &&
  2116. FNeg->hasNoNaNs() && FNeg->hasNoSignedZeros() &&
  2117. (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_OGE ||
  2118. Pred == FCmpInst::FCMP_UGT || Pred == FCmpInst::FCMP_UGE)) {
  2119. Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, TrueVal, FNeg);
  2120. return replaceInstUsesWith(SI, Fabs);
  2121. }
  2122. // See if we are selecting two values based on a comparison of the two values.
  2123. if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
  2124. if (Instruction *Result = foldSelectInstWithICmp(SI, ICI))
  2125. return Result;
  2126. if (Instruction *Add = foldAddSubSelect(SI, Builder))
  2127. return Add;
  2128. // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
  2129. auto *TI = dyn_cast<Instruction>(TrueVal);
  2130. auto *FI = dyn_cast<Instruction>(FalseVal);
  2131. if (TI && FI && TI->getOpcode() == FI->getOpcode())
  2132. if (Instruction *IV = foldSelectOpOp(SI, TI, FI))
  2133. return IV;
  2134. if (Instruction *I = foldSelectExtConst(SI))
  2135. return I;
  2136. // See if we can fold the select into one of our operands.
  2137. if (SelType->isIntOrIntVectorTy() || SelType->isFPOrFPVectorTy()) {
  2138. if (Instruction *FoldI = foldSelectIntoOp(SI, TrueVal, FalseVal))
  2139. return FoldI;
  2140. Value *LHS, *RHS;
  2141. Instruction::CastOps CastOp;
  2142. SelectPatternResult SPR = matchSelectPattern(&SI, LHS, RHS, &CastOp);
  2143. auto SPF = SPR.Flavor;
  2144. if (SPF) {
  2145. Value *LHS2, *RHS2;
  2146. if (SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor)
  2147. if (Instruction *R = foldSPFofSPF(cast<Instruction>(LHS), SPF2, LHS2,
  2148. RHS2, SI, SPF, RHS))
  2149. return R;
  2150. if (SelectPatternFlavor SPF2 = matchSelectPattern(RHS, LHS2, RHS2).Flavor)
  2151. if (Instruction *R = foldSPFofSPF(cast<Instruction>(RHS), SPF2, LHS2,
  2152. RHS2, SI, SPF, LHS))
  2153. return R;
  2154. // TODO.
  2155. // ABS(-X) -> ABS(X)
  2156. }
  2157. if (SelectPatternResult::isMinOrMax(SPF)) {
  2158. // Canonicalize so that
  2159. // - type casts are outside select patterns.
  2160. // - float clamp is transformed to min/max pattern
  2161. bool IsCastNeeded = LHS->getType() != SelType;
  2162. Value *CmpLHS = cast<CmpInst>(CondVal)->getOperand(0);
  2163. Value *CmpRHS = cast<CmpInst>(CondVal)->getOperand(1);
  2164. if (IsCastNeeded ||
  2165. (LHS->getType()->isFPOrFPVectorTy() &&
  2166. ((CmpLHS != LHS && CmpLHS != RHS) ||
  2167. (CmpRHS != LHS && CmpRHS != RHS)))) {
  2168. CmpInst::Predicate MinMaxPred = getMinMaxPred(SPF, SPR.Ordered);
  2169. Value *Cmp;
  2170. if (CmpInst::isIntPredicate(MinMaxPred)) {
  2171. Cmp = Builder.CreateICmp(MinMaxPred, LHS, RHS);
  2172. } else {
  2173. IRBuilder<>::FastMathFlagGuard FMFG(Builder);
  2174. auto FMF =
  2175. cast<FPMathOperator>(SI.getCondition())->getFastMathFlags();
  2176. Builder.setFastMathFlags(FMF);
  2177. Cmp = Builder.CreateFCmp(MinMaxPred, LHS, RHS);
  2178. }
  2179. Value *NewSI = Builder.CreateSelect(Cmp, LHS, RHS, SI.getName(), &SI);
  2180. if (!IsCastNeeded)
  2181. return replaceInstUsesWith(SI, NewSI);
  2182. Value *NewCast = Builder.CreateCast(CastOp, NewSI, SelType);
  2183. return replaceInstUsesWith(SI, NewCast);
  2184. }
  2185. // MAX(~a, ~b) -> ~MIN(a, b)
  2186. // MAX(~a, C) -> ~MIN(a, ~C)
  2187. // MIN(~a, ~b) -> ~MAX(a, b)
  2188. // MIN(~a, C) -> ~MAX(a, ~C)
  2189. auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
  2190. Value *A;
  2191. if (match(X, m_Not(m_Value(A))) && !X->hasNUsesOrMore(3) &&
  2192. !isFreeToInvert(A, A->hasOneUse()) &&
  2193. // Passing false to only consider m_Not and constants.
  2194. isFreeToInvert(Y, false)) {
  2195. Value *B = Builder.CreateNot(Y);
  2196. Value *NewMinMax = createMinMax(Builder, getInverseMinMaxFlavor(SPF),
  2197. A, B);
  2198. // Copy the profile metadata.
  2199. if (MDNode *MD = SI.getMetadata(LLVMContext::MD_prof)) {
  2200. cast<SelectInst>(NewMinMax)->setMetadata(LLVMContext::MD_prof, MD);
  2201. // Swap the metadata if the operands are swapped.
  2202. if (X == SI.getFalseValue() && Y == SI.getTrueValue())
  2203. cast<SelectInst>(NewMinMax)->swapProfMetadata();
  2204. }
  2205. return BinaryOperator::CreateNot(NewMinMax);
  2206. }
  2207. return nullptr;
  2208. };
  2209. if (Instruction *I = moveNotAfterMinMax(LHS, RHS))
  2210. return I;
  2211. if (Instruction *I = moveNotAfterMinMax(RHS, LHS))
  2212. return I;
  2213. if (Instruction *I = moveAddAfterMinMax(SPF, LHS, RHS, Builder))
  2214. return I;
  2215. if (Instruction *I = factorizeMinMaxTree(SPF, LHS, RHS, Builder))
  2216. return I;
  2217. if (Instruction *I = matchSAddSubSat(SI))
  2218. return I;
  2219. }
  2220. }
  2221. // Canonicalize select of FP values where NaN and -0.0 are not valid as
  2222. // minnum/maxnum intrinsics.
  2223. if (isa<FPMathOperator>(SI) && SI.hasNoNaNs() && SI.hasNoSignedZeros()) {
  2224. Value *X, *Y;
  2225. if (match(&SI, m_OrdFMax(m_Value(X), m_Value(Y))))
  2226. return replaceInstUsesWith(
  2227. SI, Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, X, Y, &SI));
  2228. if (match(&SI, m_OrdFMin(m_Value(X), m_Value(Y))))
  2229. return replaceInstUsesWith(
  2230. SI, Builder.CreateBinaryIntrinsic(Intrinsic::minnum, X, Y, &SI));
  2231. }
  2232. // See if we can fold the select into a phi node if the condition is a select.
  2233. if (auto *PN = dyn_cast<PHINode>(SI.getCondition()))
  2234. // The true/false values have to be live in the PHI predecessor's blocks.
  2235. if (canSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
  2236. canSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
  2237. if (Instruction *NV = foldOpIntoPhi(SI, PN))
  2238. return NV;
  2239. if (SelectInst *TrueSI = dyn_cast<SelectInst>(TrueVal)) {
  2240. if (TrueSI->getCondition()->getType() == CondVal->getType()) {
  2241. // select(C, select(C, a, b), c) -> select(C, a, c)
  2242. if (TrueSI->getCondition() == CondVal) {
  2243. if (SI.getTrueValue() == TrueSI->getTrueValue())
  2244. return nullptr;
  2245. SI.setOperand(1, TrueSI->getTrueValue());
  2246. return &SI;
  2247. }
  2248. // select(C0, select(C1, a, b), b) -> select(C0&C1, a, b)
  2249. // We choose this as normal form to enable folding on the And and shortening
  2250. // paths for the values (this helps GetUnderlyingObjects() for example).
  2251. if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) {
  2252. Value *And = Builder.CreateAnd(CondVal, TrueSI->getCondition());
  2253. SI.setOperand(0, And);
  2254. SI.setOperand(1, TrueSI->getTrueValue());
  2255. return &SI;
  2256. }
  2257. }
  2258. }
  2259. if (SelectInst *FalseSI = dyn_cast<SelectInst>(FalseVal)) {
  2260. if (FalseSI->getCondition()->getType() == CondVal->getType()) {
  2261. // select(C, a, select(C, b, c)) -> select(C, a, c)
  2262. if (FalseSI->getCondition() == CondVal) {
  2263. if (SI.getFalseValue() == FalseSI->getFalseValue())
  2264. return nullptr;
  2265. SI.setOperand(2, FalseSI->getFalseValue());
  2266. return &SI;
  2267. }
  2268. // select(C0, a, select(C1, a, b)) -> select(C0|C1, a, b)
  2269. if (FalseSI->getTrueValue() == TrueVal && FalseSI->hasOneUse()) {
  2270. Value *Or = Builder.CreateOr(CondVal, FalseSI->getCondition());
  2271. SI.setOperand(0, Or);
  2272. SI.setOperand(2, FalseSI->getFalseValue());
  2273. return &SI;
  2274. }
  2275. }
  2276. }
  2277. auto canMergeSelectThroughBinop = [](BinaryOperator *BO) {
  2278. // The select might be preventing a division by 0.
  2279. switch (BO->getOpcode()) {
  2280. default:
  2281. return true;
  2282. case Instruction::SRem:
  2283. case Instruction::URem:
  2284. case Instruction::SDiv:
  2285. case Instruction::UDiv:
  2286. return false;
  2287. }
  2288. };
  2289. // Try to simplify a binop sandwiched between 2 selects with the same
  2290. // condition.
  2291. // select(C, binop(select(C, X, Y), W), Z) -> select(C, binop(X, W), Z)
  2292. BinaryOperator *TrueBO;
  2293. if (match(TrueVal, m_OneUse(m_BinOp(TrueBO))) &&
  2294. canMergeSelectThroughBinop(TrueBO)) {
  2295. if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(0))) {
  2296. if (TrueBOSI->getCondition() == CondVal) {
  2297. TrueBO->setOperand(0, TrueBOSI->getTrueValue());
  2298. Worklist.Add(TrueBO);
  2299. return &SI;
  2300. }
  2301. }
  2302. if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(1))) {
  2303. if (TrueBOSI->getCondition() == CondVal) {
  2304. TrueBO->setOperand(1, TrueBOSI->getTrueValue());
  2305. Worklist.Add(TrueBO);
  2306. return &SI;
  2307. }
  2308. }
  2309. }
  2310. // select(C, Z, binop(select(C, X, Y), W)) -> select(C, Z, binop(Y, W))
  2311. BinaryOperator *FalseBO;
  2312. if (match(FalseVal, m_OneUse(m_BinOp(FalseBO))) &&
  2313. canMergeSelectThroughBinop(FalseBO)) {
  2314. if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(0))) {
  2315. if (FalseBOSI->getCondition() == CondVal) {
  2316. FalseBO->setOperand(0, FalseBOSI->getFalseValue());
  2317. Worklist.Add(FalseBO);
  2318. return &SI;
  2319. }
  2320. }
  2321. if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(1))) {
  2322. if (FalseBOSI->getCondition() == CondVal) {
  2323. FalseBO->setOperand(1, FalseBOSI->getFalseValue());
  2324. Worklist.Add(FalseBO);
  2325. return &SI;
  2326. }
  2327. }
  2328. }
  2329. Value *NotCond;
  2330. if (match(CondVal, m_Not(m_Value(NotCond)))) {
  2331. SI.setOperand(0, NotCond);
  2332. SI.setOperand(1, FalseVal);
  2333. SI.setOperand(2, TrueVal);
  2334. SI.swapProfMetadata();
  2335. return &SI;
  2336. }
  2337. if (VectorType *VecTy = dyn_cast<VectorType>(SelType)) {
  2338. unsigned VWidth = VecTy->getNumElements();
  2339. APInt UndefElts(VWidth, 0);
  2340. APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
  2341. if (Value *V = SimplifyDemandedVectorElts(&SI, AllOnesEltMask, UndefElts)) {
  2342. if (V != &SI)
  2343. return replaceInstUsesWith(SI, V);
  2344. return &SI;
  2345. }
  2346. }
  2347. // If we can compute the condition, there's no need for a select.
  2348. // Like the above fold, we are attempting to reduce compile-time cost by
  2349. // putting this fold here with limitations rather than in InstSimplify.
  2350. // The motivation for this call into value tracking is to take advantage of
  2351. // the assumption cache, so make sure that is populated.
  2352. if (!CondVal->getType()->isVectorTy() && !AC.assumptions().empty()) {
  2353. KnownBits Known(1);
  2354. computeKnownBits(CondVal, Known, 0, &SI);
  2355. if (Known.One.isOneValue())
  2356. return replaceInstUsesWith(SI, TrueVal);
  2357. if (Known.Zero.isOneValue())
  2358. return replaceInstUsesWith(SI, FalseVal);
  2359. }
  2360. if (Instruction *BitCastSel = foldSelectCmpBitcasts(SI, Builder))
  2361. return BitCastSel;
  2362. // Simplify selects that test the returned flag of cmpxchg instructions.
  2363. if (Instruction *Select = foldSelectCmpXchg(SI))
  2364. return Select;
  2365. if (Instruction *Select = foldSelectBinOpIdentity(SI, TLI))
  2366. return Select;
  2367. if (Instruction *Rot = foldSelectRotate(SI))
  2368. return Rot;
  2369. return nullptr;
  2370. }