InstructionCombining.cpp 126 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246
  1. //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // InstructionCombining - Combine instructions to form fewer, simple
  11. // instructions. This pass does not modify the CFG. This pass is where
  12. // algebraic simplification happens.
  13. //
  14. // This pass combines things like:
  15. // %Y = add i32 %X, 1
  16. // %Z = add i32 %Y, 1
  17. // into:
  18. // %Z = add i32 %X, 2
  19. //
  20. // This is a simple worklist driven algorithm.
  21. //
  22. // This pass guarantees that the following canonicalizations are performed on
  23. // the program:
  24. // 1. If a binary operator has a constant operand, it is moved to the RHS
  25. // 2. Bitwise operators with constant operands are always grouped so that
  26. // shifts are performed first, then or's, then and's, then xor's.
  27. // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
  28. // 4. All cmp instructions on boolean values are replaced with logical ops
  29. // 5. add X, X is represented as (X*2) => (X << 1)
  30. // 6. Multiplies with a power-of-two constant argument are transformed into
  31. // shifts.
  32. // ... etc.
  33. //
  34. //===----------------------------------------------------------------------===//
  35. #include "llvm/Transforms/InstCombine/InstCombine.h"
  36. #include "InstCombineInternal.h"
  37. #include "llvm-c/Initialization.h"
  38. #include "llvm/ADT/SmallPtrSet.h"
  39. #include "llvm/ADT/Statistic.h"
  40. #include "llvm/ADT/StringSwitch.h"
  41. #include "llvm/Analysis/AliasAnalysis.h"
  42. #include "llvm/Analysis/AssumptionCache.h"
  43. #include "llvm/Analysis/BasicAliasAnalysis.h"
  44. #include "llvm/Analysis/CFG.h"
  45. #include "llvm/Analysis/ConstantFolding.h"
  46. #include "llvm/Analysis/EHPersonalities.h"
  47. #include "llvm/Analysis/GlobalsModRef.h"
  48. #include "llvm/Analysis/InstructionSimplify.h"
  49. #include "llvm/Analysis/LoopInfo.h"
  50. #include "llvm/Analysis/MemoryBuiltins.h"
  51. #include "llvm/Analysis/TargetLibraryInfo.h"
  52. #include "llvm/Analysis/ValueTracking.h"
  53. #include "llvm/IR/CFG.h"
  54. #include "llvm/IR/DataLayout.h"
  55. #include "llvm/IR/Dominators.h"
  56. #include "llvm/IR/GetElementPtrTypeIterator.h"
  57. #include "llvm/IR/IntrinsicInst.h"
  58. #include "llvm/IR/PatternMatch.h"
  59. #include "llvm/IR/ValueHandle.h"
  60. #include "llvm/Support/CommandLine.h"
  61. #include "llvm/Support/Debug.h"
  62. #include "llvm/Support/raw_ostream.h"
  63. #include "llvm/Transforms/Scalar.h"
  64. #include "llvm/Transforms/Utils/Local.h"
  65. #include <algorithm>
  66. #include <climits>
  67. using namespace llvm;
  68. using namespace llvm::PatternMatch;
  69. #define DEBUG_TYPE "instcombine"
  70. STATISTIC(NumCombined , "Number of insts combined");
  71. STATISTIC(NumConstProp, "Number of constant folds");
  72. STATISTIC(NumDeadInst , "Number of dead inst eliminated");
  73. STATISTIC(NumSunkInst , "Number of instructions sunk");
  74. STATISTIC(NumExpand, "Number of expansions");
  75. STATISTIC(NumFactor , "Number of factorizations");
  76. STATISTIC(NumReassoc , "Number of reassociations");
  77. static cl::opt<bool>
  78. EnableExpensiveCombines("expensive-combines",
  79. cl::desc("Enable expensive instruction combines"));
  80. static cl::opt<unsigned>
  81. MaxArraySize("instcombine-maxarray-size", cl::init(1024),
  82. cl::desc("Maximum array size considered when doing a combine"));
  83. Value *InstCombiner::EmitGEPOffset(User *GEP) {
  84. return llvm::EmitGEPOffset(Builder, DL, GEP);
  85. }
  86. /// Return true if it is desirable to convert an integer computation from a
  87. /// given bit width to a new bit width.
  88. /// We don't want to convert from a legal to an illegal type or from a smaller
  89. /// to a larger illegal type. A width of '1' is always treated as a legal type
  90. /// because i1 is a fundamental type in IR, and there are many specialized
  91. /// optimizations for i1 types.
  92. bool InstCombiner::shouldChangeType(unsigned FromWidth,
  93. unsigned ToWidth) const {
  94. bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
  95. bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
  96. // If this is a legal integer from type, and the result would be an illegal
  97. // type, don't do the transformation.
  98. if (FromLegal && !ToLegal)
  99. return false;
  100. // Otherwise, if both are illegal, do not increase the size of the result. We
  101. // do allow things like i160 -> i64, but not i64 -> i160.
  102. if (!FromLegal && !ToLegal && ToWidth > FromWidth)
  103. return false;
  104. return true;
  105. }
  106. /// Return true if it is desirable to convert a computation from 'From' to 'To'.
  107. /// We don't want to convert from a legal to an illegal type or from a smaller
  108. /// to a larger illegal type. i1 is always treated as a legal type because it is
  109. /// a fundamental type in IR, and there are many specialized optimizations for
  110. /// i1 types.
  111. bool InstCombiner::shouldChangeType(Type *From, Type *To) const {
  112. assert(From->isIntegerTy() && To->isIntegerTy());
  113. unsigned FromWidth = From->getPrimitiveSizeInBits();
  114. unsigned ToWidth = To->getPrimitiveSizeInBits();
  115. return shouldChangeType(FromWidth, ToWidth);
  116. }
  117. // Return true, if No Signed Wrap should be maintained for I.
  118. // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
  119. // where both B and C should be ConstantInts, results in a constant that does
  120. // not overflow. This function only handles the Add and Sub opcodes. For
  121. // all other opcodes, the function conservatively returns false.
  122. static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
  123. OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
  124. if (!OBO || !OBO->hasNoSignedWrap())
  125. return false;
  126. // We reason about Add and Sub Only.
  127. Instruction::BinaryOps Opcode = I.getOpcode();
  128. if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
  129. return false;
  130. const APInt *BVal, *CVal;
  131. if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
  132. return false;
  133. bool Overflow = false;
  134. if (Opcode == Instruction::Add)
  135. BVal->sadd_ov(*CVal, Overflow);
  136. else
  137. BVal->ssub_ov(*CVal, Overflow);
  138. return !Overflow;
  139. }
  140. /// Conservatively clears subclassOptionalData after a reassociation or
  141. /// commutation. We preserve fast-math flags when applicable as they can be
  142. /// preserved.
  143. static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
  144. FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
  145. if (!FPMO) {
  146. I.clearSubclassOptionalData();
  147. return;
  148. }
  149. FastMathFlags FMF = I.getFastMathFlags();
  150. I.clearSubclassOptionalData();
  151. I.setFastMathFlags(FMF);
  152. }
  153. /// Combine constant operands of associative operations either before or after a
  154. /// cast to eliminate one of the associative operations:
  155. /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
  156. /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
  157. static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1) {
  158. auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
  159. if (!Cast || !Cast->hasOneUse())
  160. return false;
  161. // TODO: Enhance logic for other casts and remove this check.
  162. auto CastOpcode = Cast->getOpcode();
  163. if (CastOpcode != Instruction::ZExt)
  164. return false;
  165. // TODO: Enhance logic for other BinOps and remove this check.
  166. if (!BinOp1->isBitwiseLogicOp())
  167. return false;
  168. auto AssocOpcode = BinOp1->getOpcode();
  169. auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
  170. if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
  171. return false;
  172. Constant *C1, *C2;
  173. if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
  174. !match(BinOp2->getOperand(1), m_Constant(C2)))
  175. return false;
  176. // TODO: This assumes a zext cast.
  177. // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
  178. // to the destination type might lose bits.
  179. // Fold the constants together in the destination type:
  180. // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
  181. Type *DestTy = C1->getType();
  182. Constant *CastC2 = ConstantExpr::getCast(CastOpcode, C2, DestTy);
  183. Constant *FoldedC = ConstantExpr::get(AssocOpcode, C1, CastC2);
  184. Cast->setOperand(0, BinOp2->getOperand(0));
  185. BinOp1->setOperand(1, FoldedC);
  186. return true;
  187. }
  188. /// This performs a few simplifications for operators that are associative or
  189. /// commutative:
  190. ///
  191. /// Commutative operators:
  192. ///
  193. /// 1. Order operands such that they are listed from right (least complex) to
  194. /// left (most complex). This puts constants before unary operators before
  195. /// binary operators.
  196. ///
  197. /// Associative operators:
  198. ///
  199. /// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
  200. /// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
  201. ///
  202. /// Associative and commutative operators:
  203. ///
  204. /// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
  205. /// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
  206. /// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
  207. /// if C1 and C2 are constants.
  208. bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
  209. Instruction::BinaryOps Opcode = I.getOpcode();
  210. bool Changed = false;
  211. do {
  212. // Order operands such that they are listed from right (least complex) to
  213. // left (most complex). This puts constants before unary operators before
  214. // binary operators.
  215. if (I.isCommutative() && getComplexity(I.getOperand(0)) <
  216. getComplexity(I.getOperand(1)))
  217. Changed = !I.swapOperands();
  218. BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
  219. BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
  220. if (I.isAssociative()) {
  221. // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
  222. if (Op0 && Op0->getOpcode() == Opcode) {
  223. Value *A = Op0->getOperand(0);
  224. Value *B = Op0->getOperand(1);
  225. Value *C = I.getOperand(1);
  226. // Does "B op C" simplify?
  227. if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) {
  228. // It simplifies to V. Form "A op V".
  229. I.setOperand(0, A);
  230. I.setOperand(1, V);
  231. // Conservatively clear the optional flags, since they may not be
  232. // preserved by the reassociation.
  233. if (MaintainNoSignedWrap(I, B, C) &&
  234. (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
  235. // Note: this is only valid because SimplifyBinOp doesn't look at
  236. // the operands to Op0.
  237. I.clearSubclassOptionalData();
  238. I.setHasNoSignedWrap(true);
  239. } else {
  240. ClearSubclassDataAfterReassociation(I);
  241. }
  242. Changed = true;
  243. ++NumReassoc;
  244. continue;
  245. }
  246. }
  247. // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
  248. if (Op1 && Op1->getOpcode() == Opcode) {
  249. Value *A = I.getOperand(0);
  250. Value *B = Op1->getOperand(0);
  251. Value *C = Op1->getOperand(1);
  252. // Does "A op B" simplify?
  253. if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) {
  254. // It simplifies to V. Form "V op C".
  255. I.setOperand(0, V);
  256. I.setOperand(1, C);
  257. // Conservatively clear the optional flags, since they may not be
  258. // preserved by the reassociation.
  259. ClearSubclassDataAfterReassociation(I);
  260. Changed = true;
  261. ++NumReassoc;
  262. continue;
  263. }
  264. }
  265. }
  266. if (I.isAssociative() && I.isCommutative()) {
  267. if (simplifyAssocCastAssoc(&I)) {
  268. Changed = true;
  269. ++NumReassoc;
  270. continue;
  271. }
  272. // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
  273. if (Op0 && Op0->getOpcode() == Opcode) {
  274. Value *A = Op0->getOperand(0);
  275. Value *B = Op0->getOperand(1);
  276. Value *C = I.getOperand(1);
  277. // Does "C op A" simplify?
  278. if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
  279. // It simplifies to V. Form "V op B".
  280. I.setOperand(0, V);
  281. I.setOperand(1, B);
  282. // Conservatively clear the optional flags, since they may not be
  283. // preserved by the reassociation.
  284. ClearSubclassDataAfterReassociation(I);
  285. Changed = true;
  286. ++NumReassoc;
  287. continue;
  288. }
  289. }
  290. // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
  291. if (Op1 && Op1->getOpcode() == Opcode) {
  292. Value *A = I.getOperand(0);
  293. Value *B = Op1->getOperand(0);
  294. Value *C = Op1->getOperand(1);
  295. // Does "C op A" simplify?
  296. if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
  297. // It simplifies to V. Form "B op V".
  298. I.setOperand(0, B);
  299. I.setOperand(1, V);
  300. // Conservatively clear the optional flags, since they may not be
  301. // preserved by the reassociation.
  302. ClearSubclassDataAfterReassociation(I);
  303. Changed = true;
  304. ++NumReassoc;
  305. continue;
  306. }
  307. }
  308. // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
  309. // if C1 and C2 are constants.
  310. if (Op0 && Op1 &&
  311. Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
  312. isa<Constant>(Op0->getOperand(1)) &&
  313. isa<Constant>(Op1->getOperand(1)) &&
  314. Op0->hasOneUse() && Op1->hasOneUse()) {
  315. Value *A = Op0->getOperand(0);
  316. Constant *C1 = cast<Constant>(Op0->getOperand(1));
  317. Value *B = Op1->getOperand(0);
  318. Constant *C2 = cast<Constant>(Op1->getOperand(1));
  319. Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
  320. BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
  321. if (isa<FPMathOperator>(New)) {
  322. FastMathFlags Flags = I.getFastMathFlags();
  323. Flags &= Op0->getFastMathFlags();
  324. Flags &= Op1->getFastMathFlags();
  325. New->setFastMathFlags(Flags);
  326. }
  327. InsertNewInstWith(New, I);
  328. New->takeName(Op1);
  329. I.setOperand(0, New);
  330. I.setOperand(1, Folded);
  331. // Conservatively clear the optional flags, since they may not be
  332. // preserved by the reassociation.
  333. ClearSubclassDataAfterReassociation(I);
  334. Changed = true;
  335. continue;
  336. }
  337. }
  338. // No further simplifications.
  339. return Changed;
  340. } while (1);
  341. }
  342. /// Return whether "X LOp (Y ROp Z)" is always equal to
  343. /// "(X LOp Y) ROp (X LOp Z)".
  344. static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
  345. Instruction::BinaryOps ROp) {
  346. switch (LOp) {
  347. default:
  348. return false;
  349. case Instruction::And:
  350. // And distributes over Or and Xor.
  351. switch (ROp) {
  352. default:
  353. return false;
  354. case Instruction::Or:
  355. case Instruction::Xor:
  356. return true;
  357. }
  358. case Instruction::Mul:
  359. // Multiplication distributes over addition and subtraction.
  360. switch (ROp) {
  361. default:
  362. return false;
  363. case Instruction::Add:
  364. case Instruction::Sub:
  365. return true;
  366. }
  367. case Instruction::Or:
  368. // Or distributes over And.
  369. switch (ROp) {
  370. default:
  371. return false;
  372. case Instruction::And:
  373. return true;
  374. }
  375. }
  376. }
  377. /// Return whether "(X LOp Y) ROp Z" is always equal to
  378. /// "(X ROp Z) LOp (Y ROp Z)".
  379. static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
  380. Instruction::BinaryOps ROp) {
  381. if (Instruction::isCommutative(ROp))
  382. return LeftDistributesOverRight(ROp, LOp);
  383. switch (LOp) {
  384. default:
  385. return false;
  386. // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
  387. // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
  388. // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
  389. case Instruction::And:
  390. case Instruction::Or:
  391. case Instruction::Xor:
  392. switch (ROp) {
  393. default:
  394. return false;
  395. case Instruction::Shl:
  396. case Instruction::LShr:
  397. case Instruction::AShr:
  398. return true;
  399. }
  400. }
  401. // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
  402. // but this requires knowing that the addition does not overflow and other
  403. // such subtleties.
  404. return false;
  405. }
  406. /// This function returns identity value for given opcode, which can be used to
  407. /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
  408. static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {
  409. if (isa<Constant>(V))
  410. return nullptr;
  411. return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
  412. }
  413. /// This function factors binary ops which can be combined using distributive
  414. /// laws. This function tries to transform 'Op' based TopLevelOpcode to enable
  415. /// factorization e.g for ADD(SHL(X , 2), MUL(X, 5)), When this function called
  416. /// with TopLevelOpcode == Instruction::Add and Op = SHL(X, 2), transforms
  417. /// SHL(X, 2) to MUL(X, 4) i.e. returns Instruction::Mul with LHS set to 'X' and
  418. /// RHS to 4.
  419. static Instruction::BinaryOps
  420. getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode,
  421. BinaryOperator *Op, Value *&LHS, Value *&RHS) {
  422. if (!Op)
  423. return Instruction::BinaryOpsEnd;
  424. LHS = Op->getOperand(0);
  425. RHS = Op->getOperand(1);
  426. switch (TopLevelOpcode) {
  427. default:
  428. return Op->getOpcode();
  429. case Instruction::Add:
  430. case Instruction::Sub:
  431. if (Op->getOpcode() == Instruction::Shl) {
  432. if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
  433. // The multiplier is really 1 << CST.
  434. RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
  435. return Instruction::Mul;
  436. }
  437. }
  438. return Op->getOpcode();
  439. }
  440. // TODO: We can add other conversions e.g. shr => div etc.
  441. }
  442. /// This tries to simplify binary operations by factorizing out common terms
  443. /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
  444. static Value *tryFactorization(InstCombiner::BuilderTy *Builder,
  445. const DataLayout &DL, BinaryOperator &I,
  446. Instruction::BinaryOps InnerOpcode, Value *A,
  447. Value *B, Value *C, Value *D) {
  448. // If any of A, B, C, D are null, we can not factor I, return early.
  449. // Checking A and C should be enough.
  450. if (!A || !C || !B || !D)
  451. return nullptr;
  452. Value *V = nullptr;
  453. Value *SimplifiedInst = nullptr;
  454. Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
  455. Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
  456. // Does "X op' Y" always equal "Y op' X"?
  457. bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
  458. // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
  459. if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
  460. // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
  461. // commutative case, "(A op' B) op (C op' A)"?
  462. if (A == C || (InnerCommutative && A == D)) {
  463. if (A != C)
  464. std::swap(C, D);
  465. // Consider forming "A op' (B op D)".
  466. // If "B op D" simplifies then it can be formed with no cost.
  467. V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
  468. // If "B op D" doesn't simplify then only go on if both of the existing
  469. // operations "A op' B" and "C op' D" will be zapped as no longer used.
  470. if (!V && LHS->hasOneUse() && RHS->hasOneUse())
  471. V = Builder->CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
  472. if (V) {
  473. SimplifiedInst = Builder->CreateBinOp(InnerOpcode, A, V);
  474. }
  475. }
  476. // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
  477. if (!SimplifiedInst && RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
  478. // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
  479. // commutative case, "(A op' B) op (B op' D)"?
  480. if (B == D || (InnerCommutative && B == C)) {
  481. if (B != D)
  482. std::swap(C, D);
  483. // Consider forming "(A op C) op' B".
  484. // If "A op C" simplifies then it can be formed with no cost.
  485. V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
  486. // If "A op C" doesn't simplify then only go on if both of the existing
  487. // operations "A op' B" and "C op' D" will be zapped as no longer used.
  488. if (!V && LHS->hasOneUse() && RHS->hasOneUse())
  489. V = Builder->CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
  490. if (V) {
  491. SimplifiedInst = Builder->CreateBinOp(InnerOpcode, V, B);
  492. }
  493. }
  494. if (SimplifiedInst) {
  495. ++NumFactor;
  496. SimplifiedInst->takeName(&I);
  497. // Check if we can add NSW flag to SimplifiedInst. If so, set NSW flag.
  498. // TODO: Check for NUW.
  499. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
  500. if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
  501. bool HasNSW = false;
  502. if (isa<OverflowingBinaryOperator>(&I))
  503. HasNSW = I.hasNoSignedWrap();
  504. if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS))
  505. HasNSW &= LOBO->hasNoSignedWrap();
  506. if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS))
  507. HasNSW &= ROBO->hasNoSignedWrap();
  508. // We can propagate 'nsw' if we know that
  509. // %Y = mul nsw i16 %X, C
  510. // %Z = add nsw i16 %Y, %X
  511. // =>
  512. // %Z = mul nsw i16 %X, C+1
  513. //
  514. // iff C+1 isn't INT_MIN
  515. const APInt *CInt;
  516. if (TopLevelOpcode == Instruction::Add &&
  517. InnerOpcode == Instruction::Mul)
  518. if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
  519. BO->setHasNoSignedWrap(HasNSW);
  520. }
  521. }
  522. }
  523. return SimplifiedInst;
  524. }
  525. /// This tries to simplify binary operations which some other binary operation
  526. /// distributes over either by factorizing out common terms
  527. /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
  528. /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
  529. /// Returns the simplified value, or null if it didn't simplify.
  530. Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
  531. Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
  532. BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
  533. BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
  534. // Factorization.
  535. Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
  536. auto TopLevelOpcode = I.getOpcode();
  537. auto LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B);
  538. auto RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D);
  539. // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
  540. // a common term.
  541. if (LHSOpcode == RHSOpcode) {
  542. if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, C, D))
  543. return V;
  544. }
  545. // The instruction has the form "(A op' B) op (C)". Try to factorize common
  546. // term.
  547. if (Value *V = tryFactorization(Builder, DL, I, LHSOpcode, A, B, RHS,
  548. getIdentityValue(LHSOpcode, RHS)))
  549. return V;
  550. // The instruction has the form "(B) op (C op' D)". Try to factorize common
  551. // term.
  552. if (Value *V = tryFactorization(Builder, DL, I, RHSOpcode, LHS,
  553. getIdentityValue(RHSOpcode, LHS), C, D))
  554. return V;
  555. // Expansion.
  556. if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
  557. // The instruction has the form "(A op' B) op C". See if expanding it out
  558. // to "(A op C) op' (B op C)" results in simplifications.
  559. Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
  560. Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
  561. // Do "A op C" and "B op C" both simplify?
  562. if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL))
  563. if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {
  564. // They do! Return "L op' R".
  565. ++NumExpand;
  566. // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
  567. if ((L == A && R == B) ||
  568. (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
  569. return Op0;
  570. // Otherwise return "L op' R" if it simplifies.
  571. if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
  572. return V;
  573. // Otherwise, create a new instruction.
  574. C = Builder->CreateBinOp(InnerOpcode, L, R);
  575. C->takeName(&I);
  576. return C;
  577. }
  578. }
  579. if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
  580. // The instruction has the form "A op (B op' C)". See if expanding it out
  581. // to "(A op B) op' (A op C)" results in simplifications.
  582. Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
  583. Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
  584. // Do "A op B" and "A op C" both simplify?
  585. if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL))
  586. if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {
  587. // They do! Return "L op' R".
  588. ++NumExpand;
  589. // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
  590. if ((L == B && R == C) ||
  591. (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
  592. return Op1;
  593. // Otherwise return "L op' R" if it simplifies.
  594. if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
  595. return V;
  596. // Otherwise, create a new instruction.
  597. A = Builder->CreateBinOp(InnerOpcode, L, R);
  598. A->takeName(&I);
  599. return A;
  600. }
  601. }
  602. // (op (select (a, c, b)), (select (a, d, b))) -> (select (a, (op c, d), 0))
  603. // (op (select (a, b, c)), (select (a, b, d))) -> (select (a, 0, (op c, d)))
  604. if (auto *SI0 = dyn_cast<SelectInst>(LHS)) {
  605. if (auto *SI1 = dyn_cast<SelectInst>(RHS)) {
  606. if (SI0->getCondition() == SI1->getCondition()) {
  607. Value *SI = nullptr;
  608. if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
  609. SI1->getFalseValue(), DL, &TLI, &DT, &AC))
  610. SI = Builder->CreateSelect(SI0->getCondition(),
  611. Builder->CreateBinOp(TopLevelOpcode,
  612. SI0->getTrueValue(),
  613. SI1->getTrueValue()),
  614. V);
  615. if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
  616. SI1->getTrueValue(), DL, &TLI, &DT, &AC))
  617. SI = Builder->CreateSelect(
  618. SI0->getCondition(), V,
  619. Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
  620. SI1->getFalseValue()));
  621. if (SI) {
  622. SI->takeName(&I);
  623. return SI;
  624. }
  625. }
  626. }
  627. }
  628. return nullptr;
  629. }
  630. /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
  631. /// constant zero (which is the 'negate' form).
  632. Value *InstCombiner::dyn_castNegVal(Value *V) const {
  633. if (BinaryOperator::isNeg(V))
  634. return BinaryOperator::getNegArgument(V);
  635. // Constants can be considered to be negated values if they can be folded.
  636. if (ConstantInt *C = dyn_cast<ConstantInt>(V))
  637. return ConstantExpr::getNeg(C);
  638. if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
  639. if (C->getType()->getElementType()->isIntegerTy())
  640. return ConstantExpr::getNeg(C);
  641. if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
  642. for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
  643. Constant *Elt = CV->getAggregateElement(i);
  644. if (!Elt)
  645. return nullptr;
  646. if (isa<UndefValue>(Elt))
  647. continue;
  648. if (!isa<ConstantInt>(Elt))
  649. return nullptr;
  650. }
  651. return ConstantExpr::getNeg(CV);
  652. }
  653. return nullptr;
  654. }
  655. /// Given a 'fsub' instruction, return the RHS of the instruction if the LHS is
  656. /// a constant negative zero (which is the 'negate' form).
  657. Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
  658. if (BinaryOperator::isFNeg(V, IgnoreZeroSign))
  659. return BinaryOperator::getFNegArgument(V);
  660. // Constants can be considered to be negated values if they can be folded.
  661. if (ConstantFP *C = dyn_cast<ConstantFP>(V))
  662. return ConstantExpr::getFNeg(C);
  663. if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
  664. if (C->getType()->getElementType()->isFloatingPointTy())
  665. return ConstantExpr::getFNeg(C);
  666. return nullptr;
  667. }
  668. static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
  669. InstCombiner *IC) {
  670. if (auto *Cast = dyn_cast<CastInst>(&I))
  671. return IC->Builder->CreateCast(Cast->getOpcode(), SO, I.getType());
  672. assert(I.isBinaryOp() && "Unexpected opcode for select folding");
  673. // Figure out if the constant is the left or the right argument.
  674. bool ConstIsRHS = isa<Constant>(I.getOperand(1));
  675. Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
  676. if (auto *SOC = dyn_cast<Constant>(SO)) {
  677. if (ConstIsRHS)
  678. return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
  679. return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
  680. }
  681. Value *Op0 = SO, *Op1 = ConstOperand;
  682. if (!ConstIsRHS)
  683. std::swap(Op0, Op1);
  684. auto *BO = cast<BinaryOperator>(&I);
  685. Value *RI = IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
  686. SO->getName() + ".op");
  687. auto *FPInst = dyn_cast<Instruction>(RI);
  688. if (FPInst && isa<FPMathOperator>(FPInst))
  689. FPInst->copyFastMathFlags(BO);
  690. return RI;
  691. }
  692. Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
  693. // Don't modify shared select instructions.
  694. if (!SI->hasOneUse())
  695. return nullptr;
  696. Value *TV = SI->getTrueValue();
  697. Value *FV = SI->getFalseValue();
  698. if (!(isa<Constant>(TV) || isa<Constant>(FV)))
  699. return nullptr;
  700. // Bool selects with constant operands can be folded to logical ops.
  701. if (SI->getType()->getScalarType()->isIntegerTy(1))
  702. return nullptr;
  703. // If it's a bitcast involving vectors, make sure it has the same number of
  704. // elements on both sides.
  705. if (auto *BC = dyn_cast<BitCastInst>(&Op)) {
  706. VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
  707. VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
  708. // Verify that either both or neither are vectors.
  709. if ((SrcTy == nullptr) != (DestTy == nullptr))
  710. return nullptr;
  711. // If vectors, verify that they have the same number of elements.
  712. if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
  713. return nullptr;
  714. }
  715. // Test if a CmpInst instruction is used exclusively by a select as
  716. // part of a minimum or maximum operation. If so, refrain from doing
  717. // any other folding. This helps out other analyses which understand
  718. // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
  719. // and CodeGen. And in this case, at least one of the comparison
  720. // operands has at least one user besides the compare (the select),
  721. // which would often largely negate the benefit of folding anyway.
  722. if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) {
  723. if (CI->hasOneUse()) {
  724. Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
  725. if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
  726. (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
  727. return nullptr;
  728. }
  729. }
  730. Value *NewTV = foldOperationIntoSelectOperand(Op, TV, this);
  731. Value *NewFV = foldOperationIntoSelectOperand(Op, FV, this);
  732. return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
  733. }
  734. Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
  735. PHINode *PN = cast<PHINode>(I.getOperand(0));
  736. unsigned NumPHIValues = PN->getNumIncomingValues();
  737. if (NumPHIValues == 0)
  738. return nullptr;
  739. // We normally only transform phis with a single use. However, if a PHI has
  740. // multiple uses and they are all the same operation, we can fold *all* of the
  741. // uses into the PHI.
  742. if (!PN->hasOneUse()) {
  743. // Walk the use list for the instruction, comparing them to I.
  744. for (User *U : PN->users()) {
  745. Instruction *UI = cast<Instruction>(U);
  746. if (UI != &I && !I.isIdenticalTo(UI))
  747. return nullptr;
  748. }
  749. // Otherwise, we can replace *all* users with the new PHI we form.
  750. }
  751. // Check to see if all of the operands of the PHI are simple constants
  752. // (constantint/constantfp/undef). If there is one non-constant value,
  753. // remember the BB it is in. If there is more than one or if *it* is a PHI,
  754. // bail out. We don't do arbitrary constant expressions here because moving
  755. // their computation can be expensive without a cost model.
  756. BasicBlock *NonConstBB = nullptr;
  757. for (unsigned i = 0; i != NumPHIValues; ++i) {
  758. Value *InVal = PN->getIncomingValue(i);
  759. if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
  760. continue;
  761. if (isa<PHINode>(InVal)) return nullptr; // Itself a phi.
  762. if (NonConstBB) return nullptr; // More than one non-const value.
  763. NonConstBB = PN->getIncomingBlock(i);
  764. // If the InVal is an invoke at the end of the pred block, then we can't
  765. // insert a computation after it without breaking the edge.
  766. if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
  767. if (II->getParent() == NonConstBB)
  768. return nullptr;
  769. // If the incoming non-constant value is in I's block, we will remove one
  770. // instruction, but insert another equivalent one, leading to infinite
  771. // instcombine.
  772. if (isPotentiallyReachable(I.getParent(), NonConstBB, &DT, LI))
  773. return nullptr;
  774. }
  775. // If there is exactly one non-constant value, we can insert a copy of the
  776. // operation in that block. However, if this is a critical edge, we would be
  777. // inserting the computation on some other paths (e.g. inside a loop). Only
  778. // do this if the pred block is unconditionally branching into the phi block.
  779. if (NonConstBB != nullptr) {
  780. BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
  781. if (!BI || !BI->isUnconditional()) return nullptr;
  782. }
  783. // Okay, we can do the transformation: create the new PHI node.
  784. PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
  785. InsertNewInstBefore(NewPN, *PN);
  786. NewPN->takeName(PN);
  787. // If we are going to have to insert a new computation, do so right before the
  788. // predecessor's terminator.
  789. if (NonConstBB)
  790. Builder->SetInsertPoint(NonConstBB->getTerminator());
  791. // Next, add all of the operands to the PHI.
  792. if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
  793. // We only currently try to fold the condition of a select when it is a phi,
  794. // not the true/false values.
  795. Value *TrueV = SI->getTrueValue();
  796. Value *FalseV = SI->getFalseValue();
  797. BasicBlock *PhiTransBB = PN->getParent();
  798. for (unsigned i = 0; i != NumPHIValues; ++i) {
  799. BasicBlock *ThisBB = PN->getIncomingBlock(i);
  800. Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
  801. Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
  802. Value *InV = nullptr;
  803. // Beware of ConstantExpr: it may eventually evaluate to getNullValue,
  804. // even if currently isNullValue gives false.
  805. Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
  806. // For vector constants, we cannot use isNullValue to fold into
  807. // FalseVInPred versus TrueVInPred. When we have individual nonzero
  808. // elements in the vector, we will incorrectly fold InC to
  809. // `TrueVInPred`.
  810. if (InC && !isa<ConstantExpr>(InC) && isa<ConstantInt>(InC))
  811. InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
  812. else
  813. InV = Builder->CreateSelect(PN->getIncomingValue(i),
  814. TrueVInPred, FalseVInPred, "phitmp");
  815. NewPN->addIncoming(InV, ThisBB);
  816. }
  817. } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
  818. Constant *C = cast<Constant>(I.getOperand(1));
  819. for (unsigned i = 0; i != NumPHIValues; ++i) {
  820. Value *InV = nullptr;
  821. if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
  822. InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
  823. else if (isa<ICmpInst>(CI))
  824. InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
  825. C, "phitmp");
  826. else
  827. InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
  828. C, "phitmp");
  829. NewPN->addIncoming(InV, PN->getIncomingBlock(i));
  830. }
  831. } else if (I.getNumOperands() == 2) {
  832. Constant *C = cast<Constant>(I.getOperand(1));
  833. for (unsigned i = 0; i != NumPHIValues; ++i) {
  834. Value *InV = nullptr;
  835. if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
  836. InV = ConstantExpr::get(I.getOpcode(), InC, C);
  837. } else {
  838. InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
  839. PN->getIncomingValue(i), C, "phitmp");
  840. auto *FPInst = dyn_cast<Instruction>(InV);
  841. if (FPInst && isa<FPMathOperator>(FPInst))
  842. FPInst->copyFastMathFlags(&I);
  843. }
  844. NewPN->addIncoming(InV, PN->getIncomingBlock(i));
  845. }
  846. } else {
  847. CastInst *CI = cast<CastInst>(&I);
  848. Type *RetTy = CI->getType();
  849. for (unsigned i = 0; i != NumPHIValues; ++i) {
  850. Value *InV;
  851. if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
  852. InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
  853. else
  854. InV = Builder->CreateCast(CI->getOpcode(),
  855. PN->getIncomingValue(i), I.getType(), "phitmp");
  856. NewPN->addIncoming(InV, PN->getIncomingBlock(i));
  857. }
  858. }
  859. for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
  860. Instruction *User = cast<Instruction>(*UI++);
  861. if (User == &I) continue;
  862. replaceInstUsesWith(*User, NewPN);
  863. eraseInstFromFunction(*User);
  864. }
  865. return replaceInstUsesWith(I, NewPN);
  866. }
  867. Instruction *InstCombiner::foldOpWithConstantIntoOperand(BinaryOperator &I) {
  868. assert(isa<Constant>(I.getOperand(1)) && "Unexpected operand type");
  869. if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
  870. if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
  871. return NewSel;
  872. } else if (isa<PHINode>(I.getOperand(0))) {
  873. if (Instruction *NewPhi = FoldOpIntoPhi(I))
  874. return NewPhi;
  875. }
  876. return nullptr;
  877. }
  878. /// Given a pointer type and a constant offset, determine whether or not there
  879. /// is a sequence of GEP indices into the pointed type that will land us at the
  880. /// specified offset. If so, fill them into NewIndices and return the resultant
  881. /// element type, otherwise return null.
  882. Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
  883. SmallVectorImpl<Value *> &NewIndices) {
  884. Type *Ty = PtrTy->getElementType();
  885. if (!Ty->isSized())
  886. return nullptr;
  887. // Start with the index over the outer type. Note that the type size
  888. // might be zero (even if the offset isn't zero) if the indexed type
  889. // is something like [0 x {int, int}]
  890. Type *IntPtrTy = DL.getIntPtrType(PtrTy);
  891. int64_t FirstIdx = 0;
  892. if (int64_t TySize = DL.getTypeAllocSize(Ty)) {
  893. FirstIdx = Offset/TySize;
  894. Offset -= FirstIdx*TySize;
  895. // Handle hosts where % returns negative instead of values [0..TySize).
  896. if (Offset < 0) {
  897. --FirstIdx;
  898. Offset += TySize;
  899. assert(Offset >= 0);
  900. }
  901. assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
  902. }
  903. NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
  904. // Index into the types. If we fail, set OrigBase to null.
  905. while (Offset) {
  906. // Indexing into tail padding between struct/array elements.
  907. if (uint64_t(Offset * 8) >= DL.getTypeSizeInBits(Ty))
  908. return nullptr;
  909. if (StructType *STy = dyn_cast<StructType>(Ty)) {
  910. const StructLayout *SL = DL.getStructLayout(STy);
  911. assert(Offset < (int64_t)SL->getSizeInBytes() &&
  912. "Offset must stay within the indexed type");
  913. unsigned Elt = SL->getElementContainingOffset(Offset);
  914. NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
  915. Elt));
  916. Offset -= SL->getElementOffset(Elt);
  917. Ty = STy->getElementType(Elt);
  918. } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
  919. uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType());
  920. assert(EltSize && "Cannot index into a zero-sized array");
  921. NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
  922. Offset %= EltSize;
  923. Ty = AT->getElementType();
  924. } else {
  925. // Otherwise, we can't index into the middle of this atomic type, bail.
  926. return nullptr;
  927. }
  928. }
  929. return Ty;
  930. }
  931. static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
  932. // If this GEP has only 0 indices, it is the same pointer as
  933. // Src. If Src is not a trivial GEP too, don't combine
  934. // the indices.
  935. if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
  936. !Src.hasOneUse())
  937. return false;
  938. return true;
  939. }
  940. /// Return a value X such that Val = X * Scale, or null if none.
  941. /// If the multiplication is known not to overflow, then NoSignedWrap is set.
  942. Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
  943. assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
  944. assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
  945. Scale.getBitWidth() && "Scale not compatible with value!");
  946. // If Val is zero or Scale is one then Val = Val * Scale.
  947. if (match(Val, m_Zero()) || Scale == 1) {
  948. NoSignedWrap = true;
  949. return Val;
  950. }
  951. // If Scale is zero then it does not divide Val.
  952. if (Scale.isMinValue())
  953. return nullptr;
  954. // Look through chains of multiplications, searching for a constant that is
  955. // divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4
  956. // will find the constant factor 4 and produce X*(Y*Z). Descaling X*(Y*8) by
  957. // a factor of 4 will produce X*(Y*2). The principle of operation is to bore
  958. // down from Val:
  959. //
  960. // Val = M1 * X || Analysis starts here and works down
  961. // M1 = M2 * Y || Doesn't descend into terms with more
  962. // M2 = Z * 4 \/ than one use
  963. //
  964. // Then to modify a term at the bottom:
  965. //
  966. // Val = M1 * X
  967. // M1 = Z * Y || Replaced M2 with Z
  968. //
  969. // Then to work back up correcting nsw flags.
  970. // Op - the term we are currently analyzing. Starts at Val then drills down.
  971. // Replaced with its descaled value before exiting from the drill down loop.
  972. Value *Op = Val;
  973. // Parent - initially null, but after drilling down notes where Op came from.
  974. // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
  975. // 0'th operand of Val.
  976. std::pair<Instruction*, unsigned> Parent;
  977. // Set if the transform requires a descaling at deeper levels that doesn't
  978. // overflow.
  979. bool RequireNoSignedWrap = false;
  980. // Log base 2 of the scale. Negative if not a power of 2.
  981. int32_t logScale = Scale.exactLogBase2();
  982. for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
  983. if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
  984. // If Op is a constant divisible by Scale then descale to the quotient.
  985. APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
  986. APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
  987. if (!Remainder.isMinValue())
  988. // Not divisible by Scale.
  989. return nullptr;
  990. // Replace with the quotient in the parent.
  991. Op = ConstantInt::get(CI->getType(), Quotient);
  992. NoSignedWrap = true;
  993. break;
  994. }
  995. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
  996. if (BO->getOpcode() == Instruction::Mul) {
  997. // Multiplication.
  998. NoSignedWrap = BO->hasNoSignedWrap();
  999. if (RequireNoSignedWrap && !NoSignedWrap)
  1000. return nullptr;
  1001. // There are three cases for multiplication: multiplication by exactly
  1002. // the scale, multiplication by a constant different to the scale, and
  1003. // multiplication by something else.
  1004. Value *LHS = BO->getOperand(0);
  1005. Value *RHS = BO->getOperand(1);
  1006. if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
  1007. // Multiplication by a constant.
  1008. if (CI->getValue() == Scale) {
  1009. // Multiplication by exactly the scale, replace the multiplication
  1010. // by its left-hand side in the parent.
  1011. Op = LHS;
  1012. break;
  1013. }
  1014. // Otherwise drill down into the constant.
  1015. if (!Op->hasOneUse())
  1016. return nullptr;
  1017. Parent = std::make_pair(BO, 1);
  1018. continue;
  1019. }
  1020. // Multiplication by something else. Drill down into the left-hand side
  1021. // since that's where the reassociate pass puts the good stuff.
  1022. if (!Op->hasOneUse())
  1023. return nullptr;
  1024. Parent = std::make_pair(BO, 0);
  1025. continue;
  1026. }
  1027. if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
  1028. isa<ConstantInt>(BO->getOperand(1))) {
  1029. // Multiplication by a power of 2.
  1030. NoSignedWrap = BO->hasNoSignedWrap();
  1031. if (RequireNoSignedWrap && !NoSignedWrap)
  1032. return nullptr;
  1033. Value *LHS = BO->getOperand(0);
  1034. int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
  1035. getLimitedValue(Scale.getBitWidth());
  1036. // Op = LHS << Amt.
  1037. if (Amt == logScale) {
  1038. // Multiplication by exactly the scale, replace the multiplication
  1039. // by its left-hand side in the parent.
  1040. Op = LHS;
  1041. break;
  1042. }
  1043. if (Amt < logScale || !Op->hasOneUse())
  1044. return nullptr;
  1045. // Multiplication by more than the scale. Reduce the multiplying amount
  1046. // by the scale in the parent.
  1047. Parent = std::make_pair(BO, 1);
  1048. Op = ConstantInt::get(BO->getType(), Amt - logScale);
  1049. break;
  1050. }
  1051. }
  1052. if (!Op->hasOneUse())
  1053. return nullptr;
  1054. if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
  1055. if (Cast->getOpcode() == Instruction::SExt) {
  1056. // Op is sign-extended from a smaller type, descale in the smaller type.
  1057. unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
  1058. APInt SmallScale = Scale.trunc(SmallSize);
  1059. // Suppose Op = sext X, and we descale X as Y * SmallScale. We want to
  1060. // descale Op as (sext Y) * Scale. In order to have
  1061. // sext (Y * SmallScale) = (sext Y) * Scale
  1062. // some conditions need to hold however: SmallScale must sign-extend to
  1063. // Scale and the multiplication Y * SmallScale should not overflow.
  1064. if (SmallScale.sext(Scale.getBitWidth()) != Scale)
  1065. // SmallScale does not sign-extend to Scale.
  1066. return nullptr;
  1067. assert(SmallScale.exactLogBase2() == logScale);
  1068. // Require that Y * SmallScale must not overflow.
  1069. RequireNoSignedWrap = true;
  1070. // Drill down through the cast.
  1071. Parent = std::make_pair(Cast, 0);
  1072. Scale = SmallScale;
  1073. continue;
  1074. }
  1075. if (Cast->getOpcode() == Instruction::Trunc) {
  1076. // Op is truncated from a larger type, descale in the larger type.
  1077. // Suppose Op = trunc X, and we descale X as Y * sext Scale. Then
  1078. // trunc (Y * sext Scale) = (trunc Y) * Scale
  1079. // always holds. However (trunc Y) * Scale may overflow even if
  1080. // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
  1081. // from this point up in the expression (see later).
  1082. if (RequireNoSignedWrap)
  1083. return nullptr;
  1084. // Drill down through the cast.
  1085. unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
  1086. Parent = std::make_pair(Cast, 0);
  1087. Scale = Scale.sext(LargeSize);
  1088. if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
  1089. logScale = -1;
  1090. assert(Scale.exactLogBase2() == logScale);
  1091. continue;
  1092. }
  1093. }
  1094. // Unsupported expression, bail out.
  1095. return nullptr;
  1096. }
  1097. // If Op is zero then Val = Op * Scale.
  1098. if (match(Op, m_Zero())) {
  1099. NoSignedWrap = true;
  1100. return Op;
  1101. }
  1102. // We know that we can successfully descale, so from here on we can safely
  1103. // modify the IR. Op holds the descaled version of the deepest term in the
  1104. // expression. NoSignedWrap is 'true' if multiplying Op by Scale is known
  1105. // not to overflow.
  1106. if (!Parent.first)
  1107. // The expression only had one term.
  1108. return Op;
  1109. // Rewrite the parent using the descaled version of its operand.
  1110. assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
  1111. assert(Op != Parent.first->getOperand(Parent.second) &&
  1112. "Descaling was a no-op?");
  1113. Parent.first->setOperand(Parent.second, Op);
  1114. Worklist.Add(Parent.first);
  1115. // Now work back up the expression correcting nsw flags. The logic is based
  1116. // on the following observation: if X * Y is known not to overflow as a signed
  1117. // multiplication, and Y is replaced by a value Z with smaller absolute value,
  1118. // then X * Z will not overflow as a signed multiplication either. As we work
  1119. // our way up, having NoSignedWrap 'true' means that the descaled value at the
  1120. // current level has strictly smaller absolute value than the original.
  1121. Instruction *Ancestor = Parent.first;
  1122. do {
  1123. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
  1124. // If the multiplication wasn't nsw then we can't say anything about the
  1125. // value of the descaled multiplication, and we have to clear nsw flags
  1126. // from this point on up.
  1127. bool OpNoSignedWrap = BO->hasNoSignedWrap();
  1128. NoSignedWrap &= OpNoSignedWrap;
  1129. if (NoSignedWrap != OpNoSignedWrap) {
  1130. BO->setHasNoSignedWrap(NoSignedWrap);
  1131. Worklist.Add(Ancestor);
  1132. }
  1133. } else if (Ancestor->getOpcode() == Instruction::Trunc) {
  1134. // The fact that the descaled input to the trunc has smaller absolute
  1135. // value than the original input doesn't tell us anything useful about
  1136. // the absolute values of the truncations.
  1137. NoSignedWrap = false;
  1138. }
  1139. assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
  1140. "Failed to keep proper track of nsw flags while drilling down?");
  1141. if (Ancestor == Val)
  1142. // Got to the top, all done!
  1143. return Val;
  1144. // Move up one level in the expression.
  1145. assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
  1146. Ancestor = Ancestor->user_back();
  1147. } while (1);
  1148. }
  1149. /// \brief Creates node of binary operation with the same attributes as the
  1150. /// specified one but with other operands.
  1151. static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
  1152. InstCombiner::BuilderTy *B) {
  1153. Value *BO = B->CreateBinOp(Inst.getOpcode(), LHS, RHS);
  1154. // If LHS and RHS are constant, BO won't be a binary operator.
  1155. if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BO))
  1156. NewBO->copyIRFlags(&Inst);
  1157. return BO;
  1158. }
  1159. /// \brief Makes transformation of binary operation specific for vector types.
  1160. /// \param Inst Binary operator to transform.
  1161. /// \return Pointer to node that must replace the original binary operator, or
  1162. /// null pointer if no transformation was made.
  1163. Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
  1164. if (!Inst.getType()->isVectorTy()) return nullptr;
  1165. // It may not be safe to reorder shuffles and things like div, urem, etc.
  1166. // because we may trap when executing those ops on unknown vector elements.
  1167. // See PR20059.
  1168. if (!isSafeToSpeculativelyExecute(&Inst))
  1169. return nullptr;
  1170. unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
  1171. Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
  1172. assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth);
  1173. assert(cast<VectorType>(RHS->getType())->getNumElements() == VWidth);
  1174. // If both arguments of the binary operation are shuffles that use the same
  1175. // mask and shuffle within a single vector, move the shuffle after the binop:
  1176. // Op(shuffle(v1, m), shuffle(v2, m)) -> shuffle(Op(v1, v2), m)
  1177. auto *LShuf = dyn_cast<ShuffleVectorInst>(LHS);
  1178. auto *RShuf = dyn_cast<ShuffleVectorInst>(RHS);
  1179. if (LShuf && RShuf && LShuf->getMask() == RShuf->getMask() &&
  1180. isa<UndefValue>(LShuf->getOperand(1)) &&
  1181. isa<UndefValue>(RShuf->getOperand(1)) &&
  1182. LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType()) {
  1183. Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
  1184. RShuf->getOperand(0), Builder);
  1185. return Builder->CreateShuffleVector(
  1186. NewBO, UndefValue::get(NewBO->getType()), LShuf->getMask());
  1187. }
  1188. // If one argument is a shuffle within one vector, the other is a constant,
  1189. // try moving the shuffle after the binary operation.
  1190. ShuffleVectorInst *Shuffle = nullptr;
  1191. Constant *C1 = nullptr;
  1192. if (isa<ShuffleVectorInst>(LHS)) Shuffle = cast<ShuffleVectorInst>(LHS);
  1193. if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS);
  1194. if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS);
  1195. if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS);
  1196. if (Shuffle && C1 &&
  1197. (isa<ConstantVector>(C1) || isa<ConstantDataVector>(C1)) &&
  1198. isa<UndefValue>(Shuffle->getOperand(1)) &&
  1199. Shuffle->getType() == Shuffle->getOperand(0)->getType()) {
  1200. SmallVector<int, 16> ShMask = Shuffle->getShuffleMask();
  1201. // Find constant C2 that has property:
  1202. // shuffle(C2, ShMask) = C1
  1203. // If such constant does not exist (example: ShMask=<0,0> and C1=<1,2>)
  1204. // reorder is not possible.
  1205. SmallVector<Constant*, 16> C2M(VWidth,
  1206. UndefValue::get(C1->getType()->getScalarType()));
  1207. bool MayChange = true;
  1208. for (unsigned I = 0; I < VWidth; ++I) {
  1209. if (ShMask[I] >= 0) {
  1210. assert(ShMask[I] < (int)VWidth);
  1211. if (!isa<UndefValue>(C2M[ShMask[I]])) {
  1212. MayChange = false;
  1213. break;
  1214. }
  1215. C2M[ShMask[I]] = C1->getAggregateElement(I);
  1216. }
  1217. }
  1218. if (MayChange) {
  1219. Constant *C2 = ConstantVector::get(C2M);
  1220. Value *NewLHS = isa<Constant>(LHS) ? C2 : Shuffle->getOperand(0);
  1221. Value *NewRHS = isa<Constant>(LHS) ? Shuffle->getOperand(0) : C2;
  1222. Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
  1223. return Builder->CreateShuffleVector(NewBO,
  1224. UndefValue::get(Inst.getType()), Shuffle->getMask());
  1225. }
  1226. }
  1227. return nullptr;
  1228. }
  1229. Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
  1230. SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
  1231. if (Value *V =
  1232. SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, &TLI, &DT, &AC))
  1233. return replaceInstUsesWith(GEP, V);
  1234. Value *PtrOp = GEP.getOperand(0);
  1235. // Eliminate unneeded casts for indices, and replace indices which displace
  1236. // by multiples of a zero size type with zero.
  1237. bool MadeChange = false;
  1238. Type *IntPtrTy =
  1239. DL.getIntPtrType(GEP.getPointerOperandType()->getScalarType());
  1240. gep_type_iterator GTI = gep_type_begin(GEP);
  1241. for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
  1242. ++I, ++GTI) {
  1243. // Skip indices into struct types.
  1244. if (GTI.isStruct())
  1245. continue;
  1246. // Index type should have the same width as IntPtr
  1247. Type *IndexTy = (*I)->getType();
  1248. Type *NewIndexType = IndexTy->isVectorTy() ?
  1249. VectorType::get(IntPtrTy, IndexTy->getVectorNumElements()) : IntPtrTy;
  1250. // If the element type has zero size then any index over it is equivalent
  1251. // to an index of zero, so replace it with zero if it is not zero already.
  1252. Type *EltTy = GTI.getIndexedType();
  1253. if (EltTy->isSized() && DL.getTypeAllocSize(EltTy) == 0)
  1254. if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
  1255. *I = Constant::getNullValue(NewIndexType);
  1256. MadeChange = true;
  1257. }
  1258. if (IndexTy != NewIndexType) {
  1259. // If we are using a wider index than needed for this platform, shrink
  1260. // it to what we need. If narrower, sign-extend it to what we need.
  1261. // This explicit cast can make subsequent optimizations more obvious.
  1262. *I = Builder->CreateIntCast(*I, NewIndexType, true);
  1263. MadeChange = true;
  1264. }
  1265. }
  1266. if (MadeChange)
  1267. return &GEP;
  1268. // Check to see if the inputs to the PHI node are getelementptr instructions.
  1269. if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
  1270. GetElementPtrInst *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
  1271. if (!Op1)
  1272. return nullptr;
  1273. // Don't fold a GEP into itself through a PHI node. This can only happen
  1274. // through the back-edge of a loop. Folding a GEP into itself means that
  1275. // the value of the previous iteration needs to be stored in the meantime,
  1276. // thus requiring an additional register variable to be live, but not
  1277. // actually achieving anything (the GEP still needs to be executed once per
  1278. // loop iteration).
  1279. if (Op1 == &GEP)
  1280. return nullptr;
  1281. int DI = -1;
  1282. for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
  1283. GetElementPtrInst *Op2 = dyn_cast<GetElementPtrInst>(*I);
  1284. if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands())
  1285. return nullptr;
  1286. // As for Op1 above, don't try to fold a GEP into itself.
  1287. if (Op2 == &GEP)
  1288. return nullptr;
  1289. // Keep track of the type as we walk the GEP.
  1290. Type *CurTy = nullptr;
  1291. for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
  1292. if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
  1293. return nullptr;
  1294. if (Op1->getOperand(J) != Op2->getOperand(J)) {
  1295. if (DI == -1) {
  1296. // We have not seen any differences yet in the GEPs feeding the
  1297. // PHI yet, so we record this one if it is allowed to be a
  1298. // variable.
  1299. // The first two arguments can vary for any GEP, the rest have to be
  1300. // static for struct slots
  1301. if (J > 1 && CurTy->isStructTy())
  1302. return nullptr;
  1303. DI = J;
  1304. } else {
  1305. // The GEP is different by more than one input. While this could be
  1306. // extended to support GEPs that vary by more than one variable it
  1307. // doesn't make sense since it greatly increases the complexity and
  1308. // would result in an R+R+R addressing mode which no backend
  1309. // directly supports and would need to be broken into several
  1310. // simpler instructions anyway.
  1311. return nullptr;
  1312. }
  1313. }
  1314. // Sink down a layer of the type for the next iteration.
  1315. if (J > 0) {
  1316. if (J == 1) {
  1317. CurTy = Op1->getSourceElementType();
  1318. } else if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
  1319. CurTy = CT->getTypeAtIndex(Op1->getOperand(J));
  1320. } else {
  1321. CurTy = nullptr;
  1322. }
  1323. }
  1324. }
  1325. }
  1326. // If not all GEPs are identical we'll have to create a new PHI node.
  1327. // Check that the old PHI node has only one use so that it will get
  1328. // removed.
  1329. if (DI != -1 && !PN->hasOneUse())
  1330. return nullptr;
  1331. GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone());
  1332. if (DI == -1) {
  1333. // All the GEPs feeding the PHI are identical. Clone one down into our
  1334. // BB so that it can be merged with the current GEP.
  1335. GEP.getParent()->getInstList().insert(
  1336. GEP.getParent()->getFirstInsertionPt(), NewGEP);
  1337. } else {
  1338. // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
  1339. // into the current block so it can be merged, and create a new PHI to
  1340. // set that index.
  1341. PHINode *NewPN;
  1342. {
  1343. IRBuilderBase::InsertPointGuard Guard(*Builder);
  1344. Builder->SetInsertPoint(PN);
  1345. NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(),
  1346. PN->getNumOperands());
  1347. }
  1348. for (auto &I : PN->operands())
  1349. NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
  1350. PN->getIncomingBlock(I));
  1351. NewGEP->setOperand(DI, NewPN);
  1352. GEP.getParent()->getInstList().insert(
  1353. GEP.getParent()->getFirstInsertionPt(), NewGEP);
  1354. NewGEP->setOperand(DI, NewPN);
  1355. }
  1356. GEP.setOperand(0, NewGEP);
  1357. PtrOp = NewGEP;
  1358. }
  1359. // Combine Indices - If the source pointer to this getelementptr instruction
  1360. // is a getelementptr instruction, combine the indices of the two
  1361. // getelementptr instructions into a single instruction.
  1362. //
  1363. if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
  1364. if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
  1365. return nullptr;
  1366. // Note that if our source is a gep chain itself then we wait for that
  1367. // chain to be resolved before we perform this transformation. This
  1368. // avoids us creating a TON of code in some cases.
  1369. if (GEPOperator *SrcGEP =
  1370. dyn_cast<GEPOperator>(Src->getOperand(0)))
  1371. if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
  1372. return nullptr; // Wait until our source is folded to completion.
  1373. SmallVector<Value*, 8> Indices;
  1374. // Find out whether the last index in the source GEP is a sequential idx.
  1375. bool EndsWithSequential = false;
  1376. for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
  1377. I != E; ++I)
  1378. EndsWithSequential = I.isSequential();
  1379. // Can we combine the two pointer arithmetics offsets?
  1380. if (EndsWithSequential) {
  1381. // Replace: gep (gep %P, long B), long A, ...
  1382. // With: T = long A+B; gep %P, T, ...
  1383. //
  1384. Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
  1385. Value *GO1 = GEP.getOperand(1);
  1386. // If they aren't the same type, then the input hasn't been processed
  1387. // by the loop above yet (which canonicalizes sequential index types to
  1388. // intptr_t). Just avoid transforming this until the input has been
  1389. // normalized.
  1390. if (SO1->getType() != GO1->getType())
  1391. return nullptr;
  1392. Value* Sum = SimplifyAddInst(GO1, SO1, false, false, DL, &TLI, &DT, &AC);
  1393. // Only do the combine when we are sure the cost after the
  1394. // merge is never more than that before the merge.
  1395. if (Sum == nullptr)
  1396. return nullptr;
  1397. // Update the GEP in place if possible.
  1398. if (Src->getNumOperands() == 2) {
  1399. GEP.setOperand(0, Src->getOperand(0));
  1400. GEP.setOperand(1, Sum);
  1401. return &GEP;
  1402. }
  1403. Indices.append(Src->op_begin()+1, Src->op_end()-1);
  1404. Indices.push_back(Sum);
  1405. Indices.append(GEP.op_begin()+2, GEP.op_end());
  1406. } else if (isa<Constant>(*GEP.idx_begin()) &&
  1407. cast<Constant>(*GEP.idx_begin())->isNullValue() &&
  1408. Src->getNumOperands() != 1) {
  1409. // Otherwise we can do the fold if the first index of the GEP is a zero
  1410. Indices.append(Src->op_begin()+1, Src->op_end());
  1411. Indices.append(GEP.idx_begin()+1, GEP.idx_end());
  1412. }
  1413. if (!Indices.empty())
  1414. return GEP.isInBounds() && Src->isInBounds()
  1415. ? GetElementPtrInst::CreateInBounds(
  1416. Src->getSourceElementType(), Src->getOperand(0), Indices,
  1417. GEP.getName())
  1418. : GetElementPtrInst::Create(Src->getSourceElementType(),
  1419. Src->getOperand(0), Indices,
  1420. GEP.getName());
  1421. }
  1422. if (GEP.getNumIndices() == 1) {
  1423. unsigned AS = GEP.getPointerAddressSpace();
  1424. if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
  1425. DL.getPointerSizeInBits(AS)) {
  1426. Type *Ty = GEP.getSourceElementType();
  1427. uint64_t TyAllocSize = DL.getTypeAllocSize(Ty);
  1428. bool Matched = false;
  1429. uint64_t C;
  1430. Value *V = nullptr;
  1431. if (TyAllocSize == 1) {
  1432. V = GEP.getOperand(1);
  1433. Matched = true;
  1434. } else if (match(GEP.getOperand(1),
  1435. m_AShr(m_Value(V), m_ConstantInt(C)))) {
  1436. if (TyAllocSize == 1ULL << C)
  1437. Matched = true;
  1438. } else if (match(GEP.getOperand(1),
  1439. m_SDiv(m_Value(V), m_ConstantInt(C)))) {
  1440. if (TyAllocSize == C)
  1441. Matched = true;
  1442. }
  1443. if (Matched) {
  1444. // Canonicalize (gep i8* X, -(ptrtoint Y))
  1445. // to (inttoptr (sub (ptrtoint X), (ptrtoint Y)))
  1446. // The GEP pattern is emitted by the SCEV expander for certain kinds of
  1447. // pointer arithmetic.
  1448. if (match(V, m_Neg(m_PtrToInt(m_Value())))) {
  1449. Operator *Index = cast<Operator>(V);
  1450. Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
  1451. Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
  1452. return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
  1453. }
  1454. // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X))
  1455. // to (bitcast Y)
  1456. Value *Y;
  1457. if (match(V, m_Sub(m_PtrToInt(m_Value(Y)),
  1458. m_PtrToInt(m_Specific(GEP.getOperand(0)))))) {
  1459. return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y,
  1460. GEP.getType());
  1461. }
  1462. }
  1463. }
  1464. }
  1465. // We do not handle pointer-vector geps here.
  1466. if (GEP.getType()->isVectorTy())
  1467. return nullptr;
  1468. // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
  1469. Value *StrippedPtr = PtrOp->stripPointerCasts();
  1470. PointerType *StrippedPtrTy = cast<PointerType>(StrippedPtr->getType());
  1471. if (StrippedPtr != PtrOp) {
  1472. bool HasZeroPointerIndex = false;
  1473. if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
  1474. HasZeroPointerIndex = C->isZero();
  1475. // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
  1476. // into : GEP [10 x i8]* X, i32 0, ...
  1477. //
  1478. // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
  1479. // into : GEP i8* X, ...
  1480. //
  1481. // This occurs when the program declares an array extern like "int X[];"
  1482. if (HasZeroPointerIndex) {
  1483. if (ArrayType *CATy =
  1484. dyn_cast<ArrayType>(GEP.getSourceElementType())) {
  1485. // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
  1486. if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
  1487. // -> GEP i8* X, ...
  1488. SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
  1489. GetElementPtrInst *Res = GetElementPtrInst::Create(
  1490. StrippedPtrTy->getElementType(), StrippedPtr, Idx, GEP.getName());
  1491. Res->setIsInBounds(GEP.isInBounds());
  1492. if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
  1493. return Res;
  1494. // Insert Res, and create an addrspacecast.
  1495. // e.g.,
  1496. // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
  1497. // ->
  1498. // %0 = GEP i8 addrspace(1)* X, ...
  1499. // addrspacecast i8 addrspace(1)* %0 to i8*
  1500. return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType());
  1501. }
  1502. if (ArrayType *XATy =
  1503. dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
  1504. // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
  1505. if (CATy->getElementType() == XATy->getElementType()) {
  1506. // -> GEP [10 x i8]* X, i32 0, ...
  1507. // At this point, we know that the cast source type is a pointer
  1508. // to an array of the same type as the destination pointer
  1509. // array. Because the array type is never stepped over (there
  1510. // is a leading zero) we can fold the cast into this GEP.
  1511. if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
  1512. GEP.setOperand(0, StrippedPtr);
  1513. GEP.setSourceElementType(XATy);
  1514. return &GEP;
  1515. }
  1516. // Cannot replace the base pointer directly because StrippedPtr's
  1517. // address space is different. Instead, create a new GEP followed by
  1518. // an addrspacecast.
  1519. // e.g.,
  1520. // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
  1521. // i32 0, ...
  1522. // ->
  1523. // %0 = GEP [10 x i8] addrspace(1)* X, ...
  1524. // addrspacecast i8 addrspace(1)* %0 to i8*
  1525. SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
  1526. Value *NewGEP = GEP.isInBounds()
  1527. ? Builder->CreateInBoundsGEP(
  1528. nullptr, StrippedPtr, Idx, GEP.getName())
  1529. : Builder->CreateGEP(nullptr, StrippedPtr, Idx,
  1530. GEP.getName());
  1531. return new AddrSpaceCastInst(NewGEP, GEP.getType());
  1532. }
  1533. }
  1534. }
  1535. } else if (GEP.getNumOperands() == 2) {
  1536. // Transform things like:
  1537. // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
  1538. // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
  1539. Type *SrcElTy = StrippedPtrTy->getElementType();
  1540. Type *ResElTy = GEP.getSourceElementType();
  1541. if (SrcElTy->isArrayTy() &&
  1542. DL.getTypeAllocSize(SrcElTy->getArrayElementType()) ==
  1543. DL.getTypeAllocSize(ResElTy)) {
  1544. Type *IdxType = DL.getIntPtrType(GEP.getType());
  1545. Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
  1546. Value *NewGEP =
  1547. GEP.isInBounds()
  1548. ? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, Idx,
  1549. GEP.getName())
  1550. : Builder->CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName());
  1551. // V and GEP are both pointer types --> BitCast
  1552. return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
  1553. GEP.getType());
  1554. }
  1555. // Transform things like:
  1556. // %V = mul i64 %N, 4
  1557. // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
  1558. // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
  1559. if (ResElTy->isSized() && SrcElTy->isSized()) {
  1560. // Check that changing the type amounts to dividing the index by a scale
  1561. // factor.
  1562. uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
  1563. uint64_t SrcSize = DL.getTypeAllocSize(SrcElTy);
  1564. if (ResSize && SrcSize % ResSize == 0) {
  1565. Value *Idx = GEP.getOperand(1);
  1566. unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
  1567. uint64_t Scale = SrcSize / ResSize;
  1568. // Earlier transforms ensure that the index has type IntPtrType, which
  1569. // considerably simplifies the logic by eliminating implicit casts.
  1570. assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
  1571. "Index not cast to pointer width?");
  1572. bool NSW;
  1573. if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
  1574. // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
  1575. // If the multiplication NewIdx * Scale may overflow then the new
  1576. // GEP may not be "inbounds".
  1577. Value *NewGEP =
  1578. GEP.isInBounds() && NSW
  1579. ? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx,
  1580. GEP.getName())
  1581. : Builder->CreateGEP(nullptr, StrippedPtr, NewIdx,
  1582. GEP.getName());
  1583. // The NewGEP must be pointer typed, so must the old one -> BitCast
  1584. return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
  1585. GEP.getType());
  1586. }
  1587. }
  1588. }
  1589. // Similarly, transform things like:
  1590. // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
  1591. // (where tmp = 8*tmp2) into:
  1592. // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
  1593. if (ResElTy->isSized() && SrcElTy->isSized() && SrcElTy->isArrayTy()) {
  1594. // Check that changing to the array element type amounts to dividing the
  1595. // index by a scale factor.
  1596. uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
  1597. uint64_t ArrayEltSize =
  1598. DL.getTypeAllocSize(SrcElTy->getArrayElementType());
  1599. if (ResSize && ArrayEltSize % ResSize == 0) {
  1600. Value *Idx = GEP.getOperand(1);
  1601. unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
  1602. uint64_t Scale = ArrayEltSize / ResSize;
  1603. // Earlier transforms ensure that the index has type IntPtrType, which
  1604. // considerably simplifies the logic by eliminating implicit casts.
  1605. assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
  1606. "Index not cast to pointer width?");
  1607. bool NSW;
  1608. if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
  1609. // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
  1610. // If the multiplication NewIdx * Scale may overflow then the new
  1611. // GEP may not be "inbounds".
  1612. Value *Off[2] = {
  1613. Constant::getNullValue(DL.getIntPtrType(GEP.getType())),
  1614. NewIdx};
  1615. Value *NewGEP = GEP.isInBounds() && NSW
  1616. ? Builder->CreateInBoundsGEP(
  1617. SrcElTy, StrippedPtr, Off, GEP.getName())
  1618. : Builder->CreateGEP(SrcElTy, StrippedPtr, Off,
  1619. GEP.getName());
  1620. // The NewGEP must be pointer typed, so must the old one -> BitCast
  1621. return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
  1622. GEP.getType());
  1623. }
  1624. }
  1625. }
  1626. }
  1627. }
  1628. // addrspacecast between types is canonicalized as a bitcast, then an
  1629. // addrspacecast. To take advantage of the below bitcast + struct GEP, look
  1630. // through the addrspacecast.
  1631. if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) {
  1632. // X = bitcast A addrspace(1)* to B addrspace(1)*
  1633. // Y = addrspacecast A addrspace(1)* to B addrspace(2)*
  1634. // Z = gep Y, <...constant indices...>
  1635. // Into an addrspacecasted GEP of the struct.
  1636. if (BitCastInst *BC = dyn_cast<BitCastInst>(ASC->getOperand(0)))
  1637. PtrOp = BC;
  1638. }
  1639. /// See if we can simplify:
  1640. /// X = bitcast A* to B*
  1641. /// Y = gep X, <...constant indices...>
  1642. /// into a gep of the original struct. This is important for SROA and alias
  1643. /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
  1644. if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
  1645. Value *Operand = BCI->getOperand(0);
  1646. PointerType *OpType = cast<PointerType>(Operand->getType());
  1647. unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType());
  1648. APInt Offset(OffsetBits, 0);
  1649. if (!isa<BitCastInst>(Operand) &&
  1650. GEP.accumulateConstantOffset(DL, Offset)) {
  1651. // If this GEP instruction doesn't move the pointer, just replace the GEP
  1652. // with a bitcast of the real input to the dest type.
  1653. if (!Offset) {
  1654. // If the bitcast is of an allocation, and the allocation will be
  1655. // converted to match the type of the cast, don't touch this.
  1656. if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, &TLI)) {
  1657. // See if the bitcast simplifies, if so, don't nuke this GEP yet.
  1658. if (Instruction *I = visitBitCast(*BCI)) {
  1659. if (I != BCI) {
  1660. I->takeName(BCI);
  1661. BCI->getParent()->getInstList().insert(BCI->getIterator(), I);
  1662. replaceInstUsesWith(*BCI, I);
  1663. }
  1664. return &GEP;
  1665. }
  1666. }
  1667. if (Operand->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
  1668. return new AddrSpaceCastInst(Operand, GEP.getType());
  1669. return new BitCastInst(Operand, GEP.getType());
  1670. }
  1671. // Otherwise, if the offset is non-zero, we need to find out if there is a
  1672. // field at Offset in 'A's type. If so, we can pull the cast through the
  1673. // GEP.
  1674. SmallVector<Value*, 8> NewIndices;
  1675. if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
  1676. Value *NGEP =
  1677. GEP.isInBounds()
  1678. ? Builder->CreateInBoundsGEP(nullptr, Operand, NewIndices)
  1679. : Builder->CreateGEP(nullptr, Operand, NewIndices);
  1680. if (NGEP->getType() == GEP.getType())
  1681. return replaceInstUsesWith(GEP, NGEP);
  1682. NGEP->takeName(&GEP);
  1683. if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
  1684. return new AddrSpaceCastInst(NGEP, GEP.getType());
  1685. return new BitCastInst(NGEP, GEP.getType());
  1686. }
  1687. }
  1688. }
  1689. if (!GEP.isInBounds()) {
  1690. unsigned PtrWidth =
  1691. DL.getPointerSizeInBits(PtrOp->getType()->getPointerAddressSpace());
  1692. APInt BasePtrOffset(PtrWidth, 0);
  1693. Value *UnderlyingPtrOp =
  1694. PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
  1695. BasePtrOffset);
  1696. if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
  1697. if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
  1698. BasePtrOffset.isNonNegative()) {
  1699. APInt AllocSize(PtrWidth, DL.getTypeAllocSize(AI->getAllocatedType()));
  1700. if (BasePtrOffset.ule(AllocSize)) {
  1701. return GetElementPtrInst::CreateInBounds(
  1702. PtrOp, makeArrayRef(Ops).slice(1), GEP.getName());
  1703. }
  1704. }
  1705. }
  1706. }
  1707. return nullptr;
  1708. }
  1709. static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo *TLI,
  1710. Instruction *AI) {
  1711. if (isa<ConstantPointerNull>(V))
  1712. return true;
  1713. if (auto *LI = dyn_cast<LoadInst>(V))
  1714. return isa<GlobalVariable>(LI->getPointerOperand());
  1715. // Two distinct allocations will never be equal.
  1716. // We rely on LookThroughBitCast in isAllocLikeFn being false, since looking
  1717. // through bitcasts of V can cause
  1718. // the result statement below to be true, even when AI and V (ex:
  1719. // i8* ->i32* ->i8* of AI) are the same allocations.
  1720. return isAllocLikeFn(V, TLI) && V != AI;
  1721. }
  1722. static bool
  1723. isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
  1724. const TargetLibraryInfo *TLI) {
  1725. SmallVector<Instruction*, 4> Worklist;
  1726. Worklist.push_back(AI);
  1727. do {
  1728. Instruction *PI = Worklist.pop_back_val();
  1729. for (User *U : PI->users()) {
  1730. Instruction *I = cast<Instruction>(U);
  1731. switch (I->getOpcode()) {
  1732. default:
  1733. // Give up the moment we see something we can't handle.
  1734. return false;
  1735. case Instruction::BitCast:
  1736. case Instruction::GetElementPtr:
  1737. Users.emplace_back(I);
  1738. Worklist.push_back(I);
  1739. continue;
  1740. case Instruction::ICmp: {
  1741. ICmpInst *ICI = cast<ICmpInst>(I);
  1742. // We can fold eq/ne comparisons with null to false/true, respectively.
  1743. // We also fold comparisons in some conditions provided the alloc has
  1744. // not escaped (see isNeverEqualToUnescapedAlloc).
  1745. if (!ICI->isEquality())
  1746. return false;
  1747. unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
  1748. if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
  1749. return false;
  1750. Users.emplace_back(I);
  1751. continue;
  1752. }
  1753. case Instruction::Call:
  1754. // Ignore no-op and store intrinsics.
  1755. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
  1756. switch (II->getIntrinsicID()) {
  1757. default:
  1758. return false;
  1759. case Intrinsic::memmove:
  1760. case Intrinsic::memcpy:
  1761. case Intrinsic::memset: {
  1762. MemIntrinsic *MI = cast<MemIntrinsic>(II);
  1763. if (MI->isVolatile() || MI->getRawDest() != PI)
  1764. return false;
  1765. LLVM_FALLTHROUGH;
  1766. }
  1767. case Intrinsic::dbg_declare:
  1768. case Intrinsic::dbg_value:
  1769. case Intrinsic::invariant_start:
  1770. case Intrinsic::invariant_end:
  1771. case Intrinsic::lifetime_start:
  1772. case Intrinsic::lifetime_end:
  1773. case Intrinsic::objectsize:
  1774. Users.emplace_back(I);
  1775. continue;
  1776. }
  1777. }
  1778. if (isFreeCall(I, TLI)) {
  1779. Users.emplace_back(I);
  1780. continue;
  1781. }
  1782. return false;
  1783. case Instruction::Store: {
  1784. StoreInst *SI = cast<StoreInst>(I);
  1785. if (SI->isVolatile() || SI->getPointerOperand() != PI)
  1786. return false;
  1787. Users.emplace_back(I);
  1788. continue;
  1789. }
  1790. }
  1791. llvm_unreachable("missing a return?");
  1792. }
  1793. } while (!Worklist.empty());
  1794. return true;
  1795. }
  1796. Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
  1797. // If we have a malloc call which is only used in any amount of comparisons
  1798. // to null and free calls, delete the calls and replace the comparisons with
  1799. // true or false as appropriate.
  1800. SmallVector<WeakVH, 64> Users;
  1801. if (isAllocSiteRemovable(&MI, Users, &TLI)) {
  1802. for (unsigned i = 0, e = Users.size(); i != e; ++i) {
  1803. // Lowering all @llvm.objectsize calls first because they may
  1804. // use a bitcast/GEP of the alloca we are removing.
  1805. if (!Users[i])
  1806. continue;
  1807. Instruction *I = cast<Instruction>(&*Users[i]);
  1808. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
  1809. if (II->getIntrinsicID() == Intrinsic::objectsize) {
  1810. ConstantInt *Result = lowerObjectSizeCall(II, DL, &TLI,
  1811. /*MustSucceed=*/true);
  1812. replaceInstUsesWith(*I, Result);
  1813. eraseInstFromFunction(*I);
  1814. Users[i] = nullptr; // Skip examining in the next loop.
  1815. }
  1816. }
  1817. }
  1818. for (unsigned i = 0, e = Users.size(); i != e; ++i) {
  1819. if (!Users[i])
  1820. continue;
  1821. Instruction *I = cast<Instruction>(&*Users[i]);
  1822. if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
  1823. replaceInstUsesWith(*C,
  1824. ConstantInt::get(Type::getInt1Ty(C->getContext()),
  1825. C->isFalseWhenEqual()));
  1826. } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
  1827. replaceInstUsesWith(*I, UndefValue::get(I->getType()));
  1828. }
  1829. eraseInstFromFunction(*I);
  1830. }
  1831. if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
  1832. // Replace invoke with a NOP intrinsic to maintain the original CFG
  1833. Module *M = II->getModule();
  1834. Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
  1835. InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
  1836. None, "", II->getParent());
  1837. }
  1838. return eraseInstFromFunction(MI);
  1839. }
  1840. return nullptr;
  1841. }
  1842. /// \brief Move the call to free before a NULL test.
  1843. ///
  1844. /// Check if this free is accessed after its argument has been test
  1845. /// against NULL (property 0).
  1846. /// If yes, it is legal to move this call in its predecessor block.
  1847. ///
  1848. /// The move is performed only if the block containing the call to free
  1849. /// will be removed, i.e.:
  1850. /// 1. it has only one predecessor P, and P has two successors
  1851. /// 2. it contains the call and an unconditional branch
  1852. /// 3. its successor is the same as its predecessor's successor
  1853. ///
  1854. /// The profitability is out-of concern here and this function should
  1855. /// be called only if the caller knows this transformation would be
  1856. /// profitable (e.g., for code size).
  1857. static Instruction *
  1858. tryToMoveFreeBeforeNullTest(CallInst &FI) {
  1859. Value *Op = FI.getArgOperand(0);
  1860. BasicBlock *FreeInstrBB = FI.getParent();
  1861. BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
  1862. // Validate part of constraint #1: Only one predecessor
  1863. // FIXME: We can extend the number of predecessor, but in that case, we
  1864. // would duplicate the call to free in each predecessor and it may
  1865. // not be profitable even for code size.
  1866. if (!PredBB)
  1867. return nullptr;
  1868. // Validate constraint #2: Does this block contains only the call to
  1869. // free and an unconditional branch?
  1870. // FIXME: We could check if we can speculate everything in the
  1871. // predecessor block
  1872. if (FreeInstrBB->size() != 2)
  1873. return nullptr;
  1874. BasicBlock *SuccBB;
  1875. if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
  1876. return nullptr;
  1877. // Validate the rest of constraint #1 by matching on the pred branch.
  1878. TerminatorInst *TI = PredBB->getTerminator();
  1879. BasicBlock *TrueBB, *FalseBB;
  1880. ICmpInst::Predicate Pred;
  1881. if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
  1882. return nullptr;
  1883. if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
  1884. return nullptr;
  1885. // Validate constraint #3: Ensure the null case just falls through.
  1886. if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
  1887. return nullptr;
  1888. assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
  1889. "Broken CFG: missing edge from predecessor to successor");
  1890. FI.moveBefore(TI);
  1891. return &FI;
  1892. }
  1893. Instruction *InstCombiner::visitFree(CallInst &FI) {
  1894. Value *Op = FI.getArgOperand(0);
  1895. // free undef -> unreachable.
  1896. if (isa<UndefValue>(Op)) {
  1897. // Insert a new store to null because we cannot modify the CFG here.
  1898. Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
  1899. UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
  1900. return eraseInstFromFunction(FI);
  1901. }
  1902. // If we have 'free null' delete the instruction. This can happen in stl code
  1903. // when lots of inlining happens.
  1904. if (isa<ConstantPointerNull>(Op))
  1905. return eraseInstFromFunction(FI);
  1906. // If we optimize for code size, try to move the call to free before the null
  1907. // test so that simplify cfg can remove the empty block and dead code
  1908. // elimination the branch. I.e., helps to turn something like:
  1909. // if (foo) free(foo);
  1910. // into
  1911. // free(foo);
  1912. if (MinimizeSize)
  1913. if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
  1914. return I;
  1915. return nullptr;
  1916. }
  1917. Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
  1918. if (RI.getNumOperands() == 0) // ret void
  1919. return nullptr;
  1920. Value *ResultOp = RI.getOperand(0);
  1921. Type *VTy = ResultOp->getType();
  1922. if (!VTy->isIntegerTy())
  1923. return nullptr;
  1924. // There might be assume intrinsics dominating this return that completely
  1925. // determine the value. If so, constant fold it.
  1926. unsigned BitWidth = VTy->getPrimitiveSizeInBits();
  1927. APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
  1928. computeKnownBits(ResultOp, KnownZero, KnownOne, 0, &RI);
  1929. if ((KnownZero|KnownOne).isAllOnesValue())
  1930. RI.setOperand(0, Constant::getIntegerValue(VTy, KnownOne));
  1931. return nullptr;
  1932. }
  1933. Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
  1934. // Change br (not X), label True, label False to: br X, label False, True
  1935. Value *X = nullptr;
  1936. BasicBlock *TrueDest;
  1937. BasicBlock *FalseDest;
  1938. if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
  1939. !isa<Constant>(X)) {
  1940. // Swap Destinations and condition...
  1941. BI.setCondition(X);
  1942. BI.swapSuccessors();
  1943. return &BI;
  1944. }
  1945. // If the condition is irrelevant, remove the use so that other
  1946. // transforms on the condition become more effective.
  1947. if (BI.isConditional() &&
  1948. BI.getSuccessor(0) == BI.getSuccessor(1) &&
  1949. !isa<UndefValue>(BI.getCondition())) {
  1950. BI.setCondition(UndefValue::get(BI.getCondition()->getType()));
  1951. return &BI;
  1952. }
  1953. // Canonicalize fcmp_one -> fcmp_oeq
  1954. FCmpInst::Predicate FPred; Value *Y;
  1955. if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
  1956. TrueDest, FalseDest)) &&
  1957. BI.getCondition()->hasOneUse())
  1958. if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
  1959. FPred == FCmpInst::FCMP_OGE) {
  1960. FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
  1961. Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
  1962. // Swap Destinations and condition.
  1963. BI.swapSuccessors();
  1964. Worklist.Add(Cond);
  1965. return &BI;
  1966. }
  1967. // Canonicalize icmp_ne -> icmp_eq
  1968. ICmpInst::Predicate IPred;
  1969. if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
  1970. TrueDest, FalseDest)) &&
  1971. BI.getCondition()->hasOneUse())
  1972. if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
  1973. IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
  1974. IPred == ICmpInst::ICMP_SGE) {
  1975. ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
  1976. Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
  1977. // Swap Destinations and condition.
  1978. BI.swapSuccessors();
  1979. Worklist.Add(Cond);
  1980. return &BI;
  1981. }
  1982. return nullptr;
  1983. }
  1984. Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
  1985. Value *Cond = SI.getCondition();
  1986. Value *Op0;
  1987. ConstantInt *AddRHS;
  1988. if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
  1989. // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
  1990. for (auto Case : SI.cases()) {
  1991. Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
  1992. assert(isa<ConstantInt>(NewCase) &&
  1993. "Result of expression should be constant");
  1994. Case.setValue(cast<ConstantInt>(NewCase));
  1995. }
  1996. SI.setCondition(Op0);
  1997. return &SI;
  1998. }
  1999. unsigned BitWidth = cast<IntegerType>(Cond->getType())->getBitWidth();
  2000. APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
  2001. computeKnownBits(Cond, KnownZero, KnownOne, 0, &SI);
  2002. unsigned LeadingKnownZeros = KnownZero.countLeadingOnes();
  2003. unsigned LeadingKnownOnes = KnownOne.countLeadingOnes();
  2004. // Compute the number of leading bits we can ignore.
  2005. // TODO: A better way to determine this would use ComputeNumSignBits().
  2006. for (auto &C : SI.cases()) {
  2007. LeadingKnownZeros = std::min(
  2008. LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros());
  2009. LeadingKnownOnes = std::min(
  2010. LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes());
  2011. }
  2012. unsigned NewWidth = BitWidth - std::max(LeadingKnownZeros, LeadingKnownOnes);
  2013. // Shrink the condition operand if the new type is smaller than the old type.
  2014. // This may produce a non-standard type for the switch, but that's ok because
  2015. // the backend should extend back to a legal type for the target.
  2016. if (NewWidth > 0 && NewWidth < BitWidth) {
  2017. IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
  2018. Builder->SetInsertPoint(&SI);
  2019. Value *NewCond = Builder->CreateTrunc(Cond, Ty, "trunc");
  2020. SI.setCondition(NewCond);
  2021. for (auto Case : SI.cases()) {
  2022. APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
  2023. Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
  2024. }
  2025. return &SI;
  2026. }
  2027. return nullptr;
  2028. }
  2029. Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
  2030. Value *Agg = EV.getAggregateOperand();
  2031. if (!EV.hasIndices())
  2032. return replaceInstUsesWith(EV, Agg);
  2033. if (Value *V =
  2034. SimplifyExtractValueInst(Agg, EV.getIndices(), DL, &TLI, &DT, &AC))
  2035. return replaceInstUsesWith(EV, V);
  2036. if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
  2037. // We're extracting from an insertvalue instruction, compare the indices
  2038. const unsigned *exti, *exte, *insi, *inse;
  2039. for (exti = EV.idx_begin(), insi = IV->idx_begin(),
  2040. exte = EV.idx_end(), inse = IV->idx_end();
  2041. exti != exte && insi != inse;
  2042. ++exti, ++insi) {
  2043. if (*insi != *exti)
  2044. // The insert and extract both reference distinctly different elements.
  2045. // This means the extract is not influenced by the insert, and we can
  2046. // replace the aggregate operand of the extract with the aggregate
  2047. // operand of the insert. i.e., replace
  2048. // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
  2049. // %E = extractvalue { i32, { i32 } } %I, 0
  2050. // with
  2051. // %E = extractvalue { i32, { i32 } } %A, 0
  2052. return ExtractValueInst::Create(IV->getAggregateOperand(),
  2053. EV.getIndices());
  2054. }
  2055. if (exti == exte && insi == inse)
  2056. // Both iterators are at the end: Index lists are identical. Replace
  2057. // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
  2058. // %C = extractvalue { i32, { i32 } } %B, 1, 0
  2059. // with "i32 42"
  2060. return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
  2061. if (exti == exte) {
  2062. // The extract list is a prefix of the insert list. i.e. replace
  2063. // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
  2064. // %E = extractvalue { i32, { i32 } } %I, 1
  2065. // with
  2066. // %X = extractvalue { i32, { i32 } } %A, 1
  2067. // %E = insertvalue { i32 } %X, i32 42, 0
  2068. // by switching the order of the insert and extract (though the
  2069. // insertvalue should be left in, since it may have other uses).
  2070. Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
  2071. EV.getIndices());
  2072. return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
  2073. makeArrayRef(insi, inse));
  2074. }
  2075. if (insi == inse)
  2076. // The insert list is a prefix of the extract list
  2077. // We can simply remove the common indices from the extract and make it
  2078. // operate on the inserted value instead of the insertvalue result.
  2079. // i.e., replace
  2080. // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
  2081. // %E = extractvalue { i32, { i32 } } %I, 1, 0
  2082. // with
  2083. // %E extractvalue { i32 } { i32 42 }, 0
  2084. return ExtractValueInst::Create(IV->getInsertedValueOperand(),
  2085. makeArrayRef(exti, exte));
  2086. }
  2087. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
  2088. // We're extracting from an intrinsic, see if we're the only user, which
  2089. // allows us to simplify multiple result intrinsics to simpler things that
  2090. // just get one value.
  2091. if (II->hasOneUse()) {
  2092. // Check if we're grabbing the overflow bit or the result of a 'with
  2093. // overflow' intrinsic. If it's the latter we can remove the intrinsic
  2094. // and replace it with a traditional binary instruction.
  2095. switch (II->getIntrinsicID()) {
  2096. case Intrinsic::uadd_with_overflow:
  2097. case Intrinsic::sadd_with_overflow:
  2098. if (*EV.idx_begin() == 0) { // Normal result.
  2099. Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
  2100. replaceInstUsesWith(*II, UndefValue::get(II->getType()));
  2101. eraseInstFromFunction(*II);
  2102. return BinaryOperator::CreateAdd(LHS, RHS);
  2103. }
  2104. // If the normal result of the add is dead, and the RHS is a constant,
  2105. // we can transform this into a range comparison.
  2106. // overflow = uadd a, -4 --> overflow = icmp ugt a, 3
  2107. if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
  2108. if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
  2109. return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
  2110. ConstantExpr::getNot(CI));
  2111. break;
  2112. case Intrinsic::usub_with_overflow:
  2113. case Intrinsic::ssub_with_overflow:
  2114. if (*EV.idx_begin() == 0) { // Normal result.
  2115. Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
  2116. replaceInstUsesWith(*II, UndefValue::get(II->getType()));
  2117. eraseInstFromFunction(*II);
  2118. return BinaryOperator::CreateSub(LHS, RHS);
  2119. }
  2120. break;
  2121. case Intrinsic::umul_with_overflow:
  2122. case Intrinsic::smul_with_overflow:
  2123. if (*EV.idx_begin() == 0) { // Normal result.
  2124. Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
  2125. replaceInstUsesWith(*II, UndefValue::get(II->getType()));
  2126. eraseInstFromFunction(*II);
  2127. return BinaryOperator::CreateMul(LHS, RHS);
  2128. }
  2129. break;
  2130. default:
  2131. break;
  2132. }
  2133. }
  2134. }
  2135. if (LoadInst *L = dyn_cast<LoadInst>(Agg))
  2136. // If the (non-volatile) load only has one use, we can rewrite this to a
  2137. // load from a GEP. This reduces the size of the load. If a load is used
  2138. // only by extractvalue instructions then this either must have been
  2139. // optimized before, or it is a struct with padding, in which case we
  2140. // don't want to do the transformation as it loses padding knowledge.
  2141. if (L->isSimple() && L->hasOneUse()) {
  2142. // extractvalue has integer indices, getelementptr has Value*s. Convert.
  2143. SmallVector<Value*, 4> Indices;
  2144. // Prefix an i32 0 since we need the first element.
  2145. Indices.push_back(Builder->getInt32(0));
  2146. for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
  2147. I != E; ++I)
  2148. Indices.push_back(Builder->getInt32(*I));
  2149. // We need to insert these at the location of the old load, not at that of
  2150. // the extractvalue.
  2151. Builder->SetInsertPoint(L);
  2152. Value *GEP = Builder->CreateInBoundsGEP(L->getType(),
  2153. L->getPointerOperand(), Indices);
  2154. // Returning the load directly will cause the main loop to insert it in
  2155. // the wrong spot, so use replaceInstUsesWith().
  2156. return replaceInstUsesWith(EV, Builder->CreateLoad(GEP));
  2157. }
  2158. // We could simplify extracts from other values. Note that nested extracts may
  2159. // already be simplified implicitly by the above: extract (extract (insert) )
  2160. // will be translated into extract ( insert ( extract ) ) first and then just
  2161. // the value inserted, if appropriate. Similarly for extracts from single-use
  2162. // loads: extract (extract (load)) will be translated to extract (load (gep))
  2163. // and if again single-use then via load (gep (gep)) to load (gep).
  2164. // However, double extracts from e.g. function arguments or return values
  2165. // aren't handled yet.
  2166. return nullptr;
  2167. }
  2168. /// Return 'true' if the given typeinfo will match anything.
  2169. static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
  2170. switch (Personality) {
  2171. case EHPersonality::GNU_C:
  2172. case EHPersonality::GNU_C_SjLj:
  2173. case EHPersonality::Rust:
  2174. // The GCC C EH and Rust personality only exists to support cleanups, so
  2175. // it's not clear what the semantics of catch clauses are.
  2176. return false;
  2177. case EHPersonality::Unknown:
  2178. return false;
  2179. case EHPersonality::GNU_Ada:
  2180. // While __gnat_all_others_value will match any Ada exception, it doesn't
  2181. // match foreign exceptions (or didn't, before gcc-4.7).
  2182. return false;
  2183. case EHPersonality::GNU_CXX:
  2184. case EHPersonality::GNU_CXX_SjLj:
  2185. case EHPersonality::GNU_ObjC:
  2186. case EHPersonality::MSVC_X86SEH:
  2187. case EHPersonality::MSVC_Win64SEH:
  2188. case EHPersonality::MSVC_CXX:
  2189. case EHPersonality::CoreCLR:
  2190. return TypeInfo->isNullValue();
  2191. }
  2192. llvm_unreachable("invalid enum");
  2193. }
  2194. static bool shorter_filter(const Value *LHS, const Value *RHS) {
  2195. return
  2196. cast<ArrayType>(LHS->getType())->getNumElements()
  2197. <
  2198. cast<ArrayType>(RHS->getType())->getNumElements();
  2199. }
  2200. Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
  2201. // The logic here should be correct for any real-world personality function.
  2202. // However if that turns out not to be true, the offending logic can always
  2203. // be conditioned on the personality function, like the catch-all logic is.
  2204. EHPersonality Personality =
  2205. classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
  2206. // Simplify the list of clauses, eg by removing repeated catch clauses
  2207. // (these are often created by inlining).
  2208. bool MakeNewInstruction = false; // If true, recreate using the following:
  2209. SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
  2210. bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
  2211. SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
  2212. for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
  2213. bool isLastClause = i + 1 == e;
  2214. if (LI.isCatch(i)) {
  2215. // A catch clause.
  2216. Constant *CatchClause = LI.getClause(i);
  2217. Constant *TypeInfo = CatchClause->stripPointerCasts();
  2218. // If we already saw this clause, there is no point in having a second
  2219. // copy of it.
  2220. if (AlreadyCaught.insert(TypeInfo).second) {
  2221. // This catch clause was not already seen.
  2222. NewClauses.push_back(CatchClause);
  2223. } else {
  2224. // Repeated catch clause - drop the redundant copy.
  2225. MakeNewInstruction = true;
  2226. }
  2227. // If this is a catch-all then there is no point in keeping any following
  2228. // clauses or marking the landingpad as having a cleanup.
  2229. if (isCatchAll(Personality, TypeInfo)) {
  2230. if (!isLastClause)
  2231. MakeNewInstruction = true;
  2232. CleanupFlag = false;
  2233. break;
  2234. }
  2235. } else {
  2236. // A filter clause. If any of the filter elements were already caught
  2237. // then they can be dropped from the filter. It is tempting to try to
  2238. // exploit the filter further by saying that any typeinfo that does not
  2239. // occur in the filter can't be caught later (and thus can be dropped).
  2240. // However this would be wrong, since typeinfos can match without being
  2241. // equal (for example if one represents a C++ class, and the other some
  2242. // class derived from it).
  2243. assert(LI.isFilter(i) && "Unsupported landingpad clause!");
  2244. Constant *FilterClause = LI.getClause(i);
  2245. ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
  2246. unsigned NumTypeInfos = FilterType->getNumElements();
  2247. // An empty filter catches everything, so there is no point in keeping any
  2248. // following clauses or marking the landingpad as having a cleanup. By
  2249. // dealing with this case here the following code is made a bit simpler.
  2250. if (!NumTypeInfos) {
  2251. NewClauses.push_back(FilterClause);
  2252. if (!isLastClause)
  2253. MakeNewInstruction = true;
  2254. CleanupFlag = false;
  2255. break;
  2256. }
  2257. bool MakeNewFilter = false; // If true, make a new filter.
  2258. SmallVector<Constant *, 16> NewFilterElts; // New elements.
  2259. if (isa<ConstantAggregateZero>(FilterClause)) {
  2260. // Not an empty filter - it contains at least one null typeinfo.
  2261. assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
  2262. Constant *TypeInfo =
  2263. Constant::getNullValue(FilterType->getElementType());
  2264. // If this typeinfo is a catch-all then the filter can never match.
  2265. if (isCatchAll(Personality, TypeInfo)) {
  2266. // Throw the filter away.
  2267. MakeNewInstruction = true;
  2268. continue;
  2269. }
  2270. // There is no point in having multiple copies of this typeinfo, so
  2271. // discard all but the first copy if there is more than one.
  2272. NewFilterElts.push_back(TypeInfo);
  2273. if (NumTypeInfos > 1)
  2274. MakeNewFilter = true;
  2275. } else {
  2276. ConstantArray *Filter = cast<ConstantArray>(FilterClause);
  2277. SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
  2278. NewFilterElts.reserve(NumTypeInfos);
  2279. // Remove any filter elements that were already caught or that already
  2280. // occurred in the filter. While there, see if any of the elements are
  2281. // catch-alls. If so, the filter can be discarded.
  2282. bool SawCatchAll = false;
  2283. for (unsigned j = 0; j != NumTypeInfos; ++j) {
  2284. Constant *Elt = Filter->getOperand(j);
  2285. Constant *TypeInfo = Elt->stripPointerCasts();
  2286. if (isCatchAll(Personality, TypeInfo)) {
  2287. // This element is a catch-all. Bail out, noting this fact.
  2288. SawCatchAll = true;
  2289. break;
  2290. }
  2291. // Even if we've seen a type in a catch clause, we don't want to
  2292. // remove it from the filter. An unexpected type handler may be
  2293. // set up for a call site which throws an exception of the same
  2294. // type caught. In order for the exception thrown by the unexpected
  2295. // handler to propagate correctly, the filter must be correctly
  2296. // described for the call site.
  2297. //
  2298. // Example:
  2299. //
  2300. // void unexpected() { throw 1;}
  2301. // void foo() throw (int) {
  2302. // std::set_unexpected(unexpected);
  2303. // try {
  2304. // throw 2.0;
  2305. // } catch (int i) {}
  2306. // }
  2307. // There is no point in having multiple copies of the same typeinfo in
  2308. // a filter, so only add it if we didn't already.
  2309. if (SeenInFilter.insert(TypeInfo).second)
  2310. NewFilterElts.push_back(cast<Constant>(Elt));
  2311. }
  2312. // A filter containing a catch-all cannot match anything by definition.
  2313. if (SawCatchAll) {
  2314. // Throw the filter away.
  2315. MakeNewInstruction = true;
  2316. continue;
  2317. }
  2318. // If we dropped something from the filter, make a new one.
  2319. if (NewFilterElts.size() < NumTypeInfos)
  2320. MakeNewFilter = true;
  2321. }
  2322. if (MakeNewFilter) {
  2323. FilterType = ArrayType::get(FilterType->getElementType(),
  2324. NewFilterElts.size());
  2325. FilterClause = ConstantArray::get(FilterType, NewFilterElts);
  2326. MakeNewInstruction = true;
  2327. }
  2328. NewClauses.push_back(FilterClause);
  2329. // If the new filter is empty then it will catch everything so there is
  2330. // no point in keeping any following clauses or marking the landingpad
  2331. // as having a cleanup. The case of the original filter being empty was
  2332. // already handled above.
  2333. if (MakeNewFilter && !NewFilterElts.size()) {
  2334. assert(MakeNewInstruction && "New filter but not a new instruction!");
  2335. CleanupFlag = false;
  2336. break;
  2337. }
  2338. }
  2339. }
  2340. // If several filters occur in a row then reorder them so that the shortest
  2341. // filters come first (those with the smallest number of elements). This is
  2342. // advantageous because shorter filters are more likely to match, speeding up
  2343. // unwinding, but mostly because it increases the effectiveness of the other
  2344. // filter optimizations below.
  2345. for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
  2346. unsigned j;
  2347. // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
  2348. for (j = i; j != e; ++j)
  2349. if (!isa<ArrayType>(NewClauses[j]->getType()))
  2350. break;
  2351. // Check whether the filters are already sorted by length. We need to know
  2352. // if sorting them is actually going to do anything so that we only make a
  2353. // new landingpad instruction if it does.
  2354. for (unsigned k = i; k + 1 < j; ++k)
  2355. if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
  2356. // Not sorted, so sort the filters now. Doing an unstable sort would be
  2357. // correct too but reordering filters pointlessly might confuse users.
  2358. std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
  2359. shorter_filter);
  2360. MakeNewInstruction = true;
  2361. break;
  2362. }
  2363. // Look for the next batch of filters.
  2364. i = j + 1;
  2365. }
  2366. // If typeinfos matched if and only if equal, then the elements of a filter L
  2367. // that occurs later than a filter F could be replaced by the intersection of
  2368. // the elements of F and L. In reality two typeinfos can match without being
  2369. // equal (for example if one represents a C++ class, and the other some class
  2370. // derived from it) so it would be wrong to perform this transform in general.
  2371. // However the transform is correct and useful if F is a subset of L. In that
  2372. // case L can be replaced by F, and thus removed altogether since repeating a
  2373. // filter is pointless. So here we look at all pairs of filters F and L where
  2374. // L follows F in the list of clauses, and remove L if every element of F is
  2375. // an element of L. This can occur when inlining C++ functions with exception
  2376. // specifications.
  2377. for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
  2378. // Examine each filter in turn.
  2379. Value *Filter = NewClauses[i];
  2380. ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
  2381. if (!FTy)
  2382. // Not a filter - skip it.
  2383. continue;
  2384. unsigned FElts = FTy->getNumElements();
  2385. // Examine each filter following this one. Doing this backwards means that
  2386. // we don't have to worry about filters disappearing under us when removed.
  2387. for (unsigned j = NewClauses.size() - 1; j != i; --j) {
  2388. Value *LFilter = NewClauses[j];
  2389. ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
  2390. if (!LTy)
  2391. // Not a filter - skip it.
  2392. continue;
  2393. // If Filter is a subset of LFilter, i.e. every element of Filter is also
  2394. // an element of LFilter, then discard LFilter.
  2395. SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
  2396. // If Filter is empty then it is a subset of LFilter.
  2397. if (!FElts) {
  2398. // Discard LFilter.
  2399. NewClauses.erase(J);
  2400. MakeNewInstruction = true;
  2401. // Move on to the next filter.
  2402. continue;
  2403. }
  2404. unsigned LElts = LTy->getNumElements();
  2405. // If Filter is longer than LFilter then it cannot be a subset of it.
  2406. if (FElts > LElts)
  2407. // Move on to the next filter.
  2408. continue;
  2409. // At this point we know that LFilter has at least one element.
  2410. if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
  2411. // Filter is a subset of LFilter iff Filter contains only zeros (as we
  2412. // already know that Filter is not longer than LFilter).
  2413. if (isa<ConstantAggregateZero>(Filter)) {
  2414. assert(FElts <= LElts && "Should have handled this case earlier!");
  2415. // Discard LFilter.
  2416. NewClauses.erase(J);
  2417. MakeNewInstruction = true;
  2418. }
  2419. // Move on to the next filter.
  2420. continue;
  2421. }
  2422. ConstantArray *LArray = cast<ConstantArray>(LFilter);
  2423. if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
  2424. // Since Filter is non-empty and contains only zeros, it is a subset of
  2425. // LFilter iff LFilter contains a zero.
  2426. assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
  2427. for (unsigned l = 0; l != LElts; ++l)
  2428. if (LArray->getOperand(l)->isNullValue()) {
  2429. // LFilter contains a zero - discard it.
  2430. NewClauses.erase(J);
  2431. MakeNewInstruction = true;
  2432. break;
  2433. }
  2434. // Move on to the next filter.
  2435. continue;
  2436. }
  2437. // At this point we know that both filters are ConstantArrays. Loop over
  2438. // operands to see whether every element of Filter is also an element of
  2439. // LFilter. Since filters tend to be short this is probably faster than
  2440. // using a method that scales nicely.
  2441. ConstantArray *FArray = cast<ConstantArray>(Filter);
  2442. bool AllFound = true;
  2443. for (unsigned f = 0; f != FElts; ++f) {
  2444. Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
  2445. AllFound = false;
  2446. for (unsigned l = 0; l != LElts; ++l) {
  2447. Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
  2448. if (LTypeInfo == FTypeInfo) {
  2449. AllFound = true;
  2450. break;
  2451. }
  2452. }
  2453. if (!AllFound)
  2454. break;
  2455. }
  2456. if (AllFound) {
  2457. // Discard LFilter.
  2458. NewClauses.erase(J);
  2459. MakeNewInstruction = true;
  2460. }
  2461. // Move on to the next filter.
  2462. }
  2463. }
  2464. // If we changed any of the clauses, replace the old landingpad instruction
  2465. // with a new one.
  2466. if (MakeNewInstruction) {
  2467. LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
  2468. NewClauses.size());
  2469. for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
  2470. NLI->addClause(NewClauses[i]);
  2471. // A landing pad with no clauses must have the cleanup flag set. It is
  2472. // theoretically possible, though highly unlikely, that we eliminated all
  2473. // clauses. If so, force the cleanup flag to true.
  2474. if (NewClauses.empty())
  2475. CleanupFlag = true;
  2476. NLI->setCleanup(CleanupFlag);
  2477. return NLI;
  2478. }
  2479. // Even if none of the clauses changed, we may nonetheless have understood
  2480. // that the cleanup flag is pointless. Clear it if so.
  2481. if (LI.isCleanup() != CleanupFlag) {
  2482. assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
  2483. LI.setCleanup(CleanupFlag);
  2484. return &LI;
  2485. }
  2486. return nullptr;
  2487. }
  2488. /// Try to move the specified instruction from its current block into the
  2489. /// beginning of DestBlock, which can only happen if it's safe to move the
  2490. /// instruction past all of the instructions between it and the end of its
  2491. /// block.
  2492. static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
  2493. assert(I->hasOneUse() && "Invariants didn't hold!");
  2494. // Cannot move control-flow-involving, volatile loads, vaarg, etc.
  2495. if (isa<PHINode>(I) || I->isEHPad() || I->mayHaveSideEffects() ||
  2496. isa<TerminatorInst>(I))
  2497. return false;
  2498. // Do not sink alloca instructions out of the entry block.
  2499. if (isa<AllocaInst>(I) && I->getParent() ==
  2500. &DestBlock->getParent()->getEntryBlock())
  2501. return false;
  2502. // Do not sink into catchswitch blocks.
  2503. if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
  2504. return false;
  2505. // Do not sink convergent call instructions.
  2506. if (auto *CI = dyn_cast<CallInst>(I)) {
  2507. if (CI->isConvergent())
  2508. return false;
  2509. }
  2510. // We can only sink load instructions if there is nothing between the load and
  2511. // the end of block that could change the value.
  2512. if (I->mayReadFromMemory()) {
  2513. for (BasicBlock::iterator Scan = I->getIterator(),
  2514. E = I->getParent()->end();
  2515. Scan != E; ++Scan)
  2516. if (Scan->mayWriteToMemory())
  2517. return false;
  2518. }
  2519. BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
  2520. I->moveBefore(&*InsertPos);
  2521. ++NumSunkInst;
  2522. return true;
  2523. }
  2524. bool InstCombiner::run() {
  2525. while (!Worklist.isEmpty()) {
  2526. Instruction *I = Worklist.RemoveOne();
  2527. if (I == nullptr) continue; // skip null values.
  2528. // Check to see if we can DCE the instruction.
  2529. if (isInstructionTriviallyDead(I, &TLI)) {
  2530. DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
  2531. eraseInstFromFunction(*I);
  2532. ++NumDeadInst;
  2533. MadeIRChange = true;
  2534. continue;
  2535. }
  2536. // Instruction isn't dead, see if we can constant propagate it.
  2537. if (!I->use_empty() &&
  2538. (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
  2539. if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
  2540. DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
  2541. // Add operands to the worklist.
  2542. replaceInstUsesWith(*I, C);
  2543. ++NumConstProp;
  2544. if (isInstructionTriviallyDead(I, &TLI))
  2545. eraseInstFromFunction(*I);
  2546. MadeIRChange = true;
  2547. continue;
  2548. }
  2549. }
  2550. // In general, it is possible for computeKnownBits to determine all bits in
  2551. // a value even when the operands are not all constants.
  2552. Type *Ty = I->getType();
  2553. if (ExpensiveCombines && !I->use_empty() && Ty->isIntOrIntVectorTy()) {
  2554. unsigned BitWidth = Ty->getScalarSizeInBits();
  2555. APInt KnownZero(BitWidth, 0);
  2556. APInt KnownOne(BitWidth, 0);
  2557. computeKnownBits(I, KnownZero, KnownOne, /*Depth*/0, I);
  2558. if ((KnownZero | KnownOne).isAllOnesValue()) {
  2559. Constant *C = ConstantInt::get(Ty, KnownOne);
  2560. DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
  2561. " from: " << *I << '\n');
  2562. // Add operands to the worklist.
  2563. replaceInstUsesWith(*I, C);
  2564. ++NumConstProp;
  2565. if (isInstructionTriviallyDead(I, &TLI))
  2566. eraseInstFromFunction(*I);
  2567. MadeIRChange = true;
  2568. continue;
  2569. }
  2570. }
  2571. // See if we can trivially sink this instruction to a successor basic block.
  2572. if (I->hasOneUse()) {
  2573. BasicBlock *BB = I->getParent();
  2574. Instruction *UserInst = cast<Instruction>(*I->user_begin());
  2575. BasicBlock *UserParent;
  2576. // Get the block the use occurs in.
  2577. if (PHINode *PN = dyn_cast<PHINode>(UserInst))
  2578. UserParent = PN->getIncomingBlock(*I->use_begin());
  2579. else
  2580. UserParent = UserInst->getParent();
  2581. if (UserParent != BB) {
  2582. bool UserIsSuccessor = false;
  2583. // See if the user is one of our successors.
  2584. for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
  2585. if (*SI == UserParent) {
  2586. UserIsSuccessor = true;
  2587. break;
  2588. }
  2589. // If the user is one of our immediate successors, and if that successor
  2590. // only has us as a predecessors (we'd have to split the critical edge
  2591. // otherwise), we can keep going.
  2592. if (UserIsSuccessor && UserParent->getUniquePredecessor()) {
  2593. // Okay, the CFG is simple enough, try to sink this instruction.
  2594. if (TryToSinkInstruction(I, UserParent)) {
  2595. DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
  2596. MadeIRChange = true;
  2597. // We'll add uses of the sunk instruction below, but since sinking
  2598. // can expose opportunities for it's *operands* add them to the
  2599. // worklist
  2600. for (Use &U : I->operands())
  2601. if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
  2602. Worklist.Add(OpI);
  2603. }
  2604. }
  2605. }
  2606. }
  2607. // Now that we have an instruction, try combining it to simplify it.
  2608. Builder->SetInsertPoint(I);
  2609. Builder->SetCurrentDebugLocation(I->getDebugLoc());
  2610. #ifndef NDEBUG
  2611. std::string OrigI;
  2612. #endif
  2613. DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
  2614. DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
  2615. if (Instruction *Result = visit(*I)) {
  2616. ++NumCombined;
  2617. // Should we replace the old instruction with a new one?
  2618. if (Result != I) {
  2619. DEBUG(dbgs() << "IC: Old = " << *I << '\n'
  2620. << " New = " << *Result << '\n');
  2621. if (I->getDebugLoc())
  2622. Result->setDebugLoc(I->getDebugLoc());
  2623. // Everything uses the new instruction now.
  2624. I->replaceAllUsesWith(Result);
  2625. // Move the name to the new instruction first.
  2626. Result->takeName(I);
  2627. // Push the new instruction and any users onto the worklist.
  2628. Worklist.AddUsersToWorkList(*Result);
  2629. Worklist.Add(Result);
  2630. // Insert the new instruction into the basic block...
  2631. BasicBlock *InstParent = I->getParent();
  2632. BasicBlock::iterator InsertPos = I->getIterator();
  2633. // If we replace a PHI with something that isn't a PHI, fix up the
  2634. // insertion point.
  2635. if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
  2636. InsertPos = InstParent->getFirstInsertionPt();
  2637. InstParent->getInstList().insert(InsertPos, Result);
  2638. eraseInstFromFunction(*I);
  2639. } else {
  2640. DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
  2641. << " New = " << *I << '\n');
  2642. // If the instruction was modified, it's possible that it is now dead.
  2643. // if so, remove it.
  2644. if (isInstructionTriviallyDead(I, &TLI)) {
  2645. eraseInstFromFunction(*I);
  2646. } else {
  2647. Worklist.AddUsersToWorkList(*I);
  2648. Worklist.Add(I);
  2649. }
  2650. }
  2651. MadeIRChange = true;
  2652. }
  2653. }
  2654. Worklist.Zap();
  2655. return MadeIRChange;
  2656. }
  2657. /// Walk the function in depth-first order, adding all reachable code to the
  2658. /// worklist.
  2659. ///
  2660. /// This has a couple of tricks to make the code faster and more powerful. In
  2661. /// particular, we constant fold and DCE instructions as we go, to avoid adding
  2662. /// them to the worklist (this significantly speeds up instcombine on code where
  2663. /// many instructions are dead or constant). Additionally, if we find a branch
  2664. /// whose condition is a known constant, we only visit the reachable successors.
  2665. ///
  2666. static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
  2667. SmallPtrSetImpl<BasicBlock *> &Visited,
  2668. InstCombineWorklist &ICWorklist,
  2669. const TargetLibraryInfo *TLI) {
  2670. bool MadeIRChange = false;
  2671. SmallVector<BasicBlock*, 256> Worklist;
  2672. Worklist.push_back(BB);
  2673. SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
  2674. DenseMap<Constant *, Constant *> FoldedConstants;
  2675. do {
  2676. BB = Worklist.pop_back_val();
  2677. // We have now visited this block! If we've already been here, ignore it.
  2678. if (!Visited.insert(BB).second)
  2679. continue;
  2680. for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
  2681. Instruction *Inst = &*BBI++;
  2682. // DCE instruction if trivially dead.
  2683. if (isInstructionTriviallyDead(Inst, TLI)) {
  2684. ++NumDeadInst;
  2685. DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
  2686. Inst->eraseFromParent();
  2687. continue;
  2688. }
  2689. // ConstantProp instruction if trivially constant.
  2690. if (!Inst->use_empty() &&
  2691. (Inst->getNumOperands() == 0 || isa<Constant>(Inst->getOperand(0))))
  2692. if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
  2693. DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
  2694. << *Inst << '\n');
  2695. Inst->replaceAllUsesWith(C);
  2696. ++NumConstProp;
  2697. if (isInstructionTriviallyDead(Inst, TLI))
  2698. Inst->eraseFromParent();
  2699. continue;
  2700. }
  2701. // See if we can constant fold its operands.
  2702. for (Use &U : Inst->operands()) {
  2703. if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
  2704. continue;
  2705. auto *C = cast<Constant>(U);
  2706. Constant *&FoldRes = FoldedConstants[C];
  2707. if (!FoldRes)
  2708. FoldRes = ConstantFoldConstant(C, DL, TLI);
  2709. if (!FoldRes)
  2710. FoldRes = C;
  2711. if (FoldRes != C) {
  2712. DEBUG(dbgs() << "IC: ConstFold operand of: " << *Inst
  2713. << "\n Old = " << *C
  2714. << "\n New = " << *FoldRes << '\n');
  2715. U = FoldRes;
  2716. MadeIRChange = true;
  2717. }
  2718. }
  2719. InstrsForInstCombineWorklist.push_back(Inst);
  2720. }
  2721. // Recursively visit successors. If this is a branch or switch on a
  2722. // constant, only visit the reachable successor.
  2723. TerminatorInst *TI = BB->getTerminator();
  2724. if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
  2725. if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
  2726. bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
  2727. BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
  2728. Worklist.push_back(ReachableBB);
  2729. continue;
  2730. }
  2731. } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
  2732. if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
  2733. Worklist.push_back(SI->findCaseValue(Cond)->getCaseSuccessor());
  2734. continue;
  2735. }
  2736. }
  2737. for (BasicBlock *SuccBB : TI->successors())
  2738. Worklist.push_back(SuccBB);
  2739. } while (!Worklist.empty());
  2740. // Once we've found all of the instructions to add to instcombine's worklist,
  2741. // add them in reverse order. This way instcombine will visit from the top
  2742. // of the function down. This jives well with the way that it adds all uses
  2743. // of instructions to the worklist after doing a transformation, thus avoiding
  2744. // some N^2 behavior in pathological cases.
  2745. ICWorklist.AddInitialGroup(InstrsForInstCombineWorklist);
  2746. return MadeIRChange;
  2747. }
  2748. /// \brief Populate the IC worklist from a function, and prune any dead basic
  2749. /// blocks discovered in the process.
  2750. ///
  2751. /// This also does basic constant propagation and other forward fixing to make
  2752. /// the combiner itself run much faster.
  2753. static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
  2754. TargetLibraryInfo *TLI,
  2755. InstCombineWorklist &ICWorklist) {
  2756. bool MadeIRChange = false;
  2757. // Do a depth-first traversal of the function, populate the worklist with
  2758. // the reachable instructions. Ignore blocks that are not reachable. Keep
  2759. // track of which blocks we visit.
  2760. SmallPtrSet<BasicBlock *, 32> Visited;
  2761. MadeIRChange |=
  2762. AddReachableCodeToWorklist(&F.front(), DL, Visited, ICWorklist, TLI);
  2763. // Do a quick scan over the function. If we find any blocks that are
  2764. // unreachable, remove any instructions inside of them. This prevents
  2765. // the instcombine code from having to deal with some bad special cases.
  2766. for (BasicBlock &BB : F) {
  2767. if (Visited.count(&BB))
  2768. continue;
  2769. unsigned NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
  2770. MadeIRChange |= NumDeadInstInBB > 0;
  2771. NumDeadInst += NumDeadInstInBB;
  2772. }
  2773. return MadeIRChange;
  2774. }
  2775. static bool
  2776. combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
  2777. AliasAnalysis *AA, AssumptionCache &AC,
  2778. TargetLibraryInfo &TLI, DominatorTree &DT,
  2779. bool ExpensiveCombines = true,
  2780. LoopInfo *LI = nullptr) {
  2781. auto &DL = F.getParent()->getDataLayout();
  2782. ExpensiveCombines |= EnableExpensiveCombines;
  2783. /// Builder - This is an IRBuilder that automatically inserts new
  2784. /// instructions into the worklist when they are created.
  2785. IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
  2786. F.getContext(), TargetFolder(DL),
  2787. IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
  2788. Worklist.Add(I);
  2789. using namespace llvm::PatternMatch;
  2790. if (match(I, m_Intrinsic<Intrinsic::assume>()))
  2791. AC.registerAssumption(cast<CallInst>(I));
  2792. }));
  2793. // Lower dbg.declare intrinsics otherwise their value may be clobbered
  2794. // by instcombiner.
  2795. bool DbgDeclaresChanged = LowerDbgDeclare(F);
  2796. // Iterate while there is work to do.
  2797. int Iteration = 0;
  2798. for (;;) {
  2799. ++Iteration;
  2800. DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
  2801. << F.getName() << "\n");
  2802. bool Changed = prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
  2803. InstCombiner IC(Worklist, &Builder, F.optForMinSize(), ExpensiveCombines,
  2804. AA, AC, TLI, DT, DL, LI);
  2805. IC.MaxArraySizeForCombine = MaxArraySize;
  2806. Changed |= IC.run();
  2807. if (!Changed)
  2808. break;
  2809. }
  2810. return DbgDeclaresChanged || Iteration > 1;
  2811. }
  2812. PreservedAnalyses InstCombinePass::run(Function &F,
  2813. FunctionAnalysisManager &AM) {
  2814. auto &AC = AM.getResult<AssumptionAnalysis>(F);
  2815. auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
  2816. auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
  2817. auto *LI = AM.getCachedResult<LoopAnalysis>(F);
  2818. // FIXME: The AliasAnalysis is not yet supported in the new pass manager
  2819. if (!combineInstructionsOverFunction(F, Worklist, nullptr, AC, TLI, DT,
  2820. ExpensiveCombines, LI))
  2821. // No changes, all analyses are preserved.
  2822. return PreservedAnalyses::all();
  2823. // Mark all the analyses that instcombine updates as preserved.
  2824. PreservedAnalyses PA;
  2825. PA.preserveSet<CFGAnalyses>();
  2826. PA.preserve<AAManager>();
  2827. PA.preserve<GlobalsAA>();
  2828. return PA;
  2829. }
  2830. void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
  2831. AU.setPreservesCFG();
  2832. AU.addRequired<AAResultsWrapperPass>();
  2833. AU.addRequired<AssumptionCacheTracker>();
  2834. AU.addRequired<TargetLibraryInfoWrapperPass>();
  2835. AU.addRequired<DominatorTreeWrapperPass>();
  2836. AU.addPreserved<DominatorTreeWrapperPass>();
  2837. AU.addPreserved<AAResultsWrapperPass>();
  2838. AU.addPreserved<BasicAAWrapperPass>();
  2839. AU.addPreserved<GlobalsAAWrapperPass>();
  2840. }
  2841. bool InstructionCombiningPass::runOnFunction(Function &F) {
  2842. if (skipFunction(F))
  2843. return false;
  2844. // Required analyses.
  2845. auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  2846. auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  2847. auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
  2848. auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  2849. // Optional analyses.
  2850. auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
  2851. auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
  2852. return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, DT,
  2853. ExpensiveCombines, LI);
  2854. }
  2855. char InstructionCombiningPass::ID = 0;
  2856. INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
  2857. "Combine redundant instructions", false, false)
  2858. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  2859. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
  2860. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  2861. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  2862. INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
  2863. INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
  2864. "Combine redundant instructions", false, false)
  2865. // Initialization Routines
  2866. void llvm::initializeInstCombine(PassRegistry &Registry) {
  2867. initializeInstructionCombiningPassPass(Registry);
  2868. }
  2869. void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
  2870. initializeInstructionCombiningPassPass(*unwrap(R));
  2871. }
  2872. FunctionPass *llvm::createInstructionCombiningPass(bool ExpensiveCombines) {
  2873. return new InstructionCombiningPass(ExpensiveCombines);
  2874. }