Instructions.cpp 161 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301
  1. //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file implements all of the non-inline methods for the LLVM instruction
  10. // classes.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "llvm/IR/Instructions.h"
  14. #include "LLVMContextImpl.h"
  15. #include "llvm/ADT/None.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/ADT/Twine.h"
  18. #include "llvm/IR/Attributes.h"
  19. #include "llvm/IR/BasicBlock.h"
  20. #include "llvm/IR/CallSite.h"
  21. #include "llvm/IR/Constant.h"
  22. #include "llvm/IR/Constants.h"
  23. #include "llvm/IR/DataLayout.h"
  24. #include "llvm/IR/DerivedTypes.h"
  25. #include "llvm/IR/Function.h"
  26. #include "llvm/IR/InstrTypes.h"
  27. #include "llvm/IR/Instruction.h"
  28. #include "llvm/IR/Intrinsics.h"
  29. #include "llvm/IR/LLVMContext.h"
  30. #include "llvm/IR/MDBuilder.h"
  31. #include "llvm/IR/Metadata.h"
  32. #include "llvm/IR/Module.h"
  33. #include "llvm/IR/Operator.h"
  34. #include "llvm/IR/Type.h"
  35. #include "llvm/IR/Value.h"
  36. #include "llvm/Support/AtomicOrdering.h"
  37. #include "llvm/Support/Casting.h"
  38. #include "llvm/Support/ErrorHandling.h"
  39. #include "llvm/Support/MathExtras.h"
  40. #include <algorithm>
  41. #include <cassert>
  42. #include <cstdint>
  43. #include <vector>
  44. using namespace llvm;
  45. //===----------------------------------------------------------------------===//
  46. // AllocaInst Class
  47. //===----------------------------------------------------------------------===//
  48. Optional<uint64_t>
  49. AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
  50. uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType());
  51. if (isArrayAllocation()) {
  52. auto C = dyn_cast<ConstantInt>(getArraySize());
  53. if (!C)
  54. return None;
  55. Size *= C->getZExtValue();
  56. }
  57. return Size;
  58. }
  59. //===----------------------------------------------------------------------===//
  60. // CallSite Class
  61. //===----------------------------------------------------------------------===//
  62. User::op_iterator CallSite::getCallee() const {
  63. return cast<CallBase>(getInstruction())->op_end() - 1;
  64. }
  65. //===----------------------------------------------------------------------===//
  66. // SelectInst Class
  67. //===----------------------------------------------------------------------===//
  68. /// areInvalidOperands - Return a string if the specified operands are invalid
  69. /// for a select operation, otherwise return null.
  70. const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
  71. if (Op1->getType() != Op2->getType())
  72. return "both values to select must have same type";
  73. if (Op1->getType()->isTokenTy())
  74. return "select values cannot have token type";
  75. if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
  76. // Vector select.
  77. if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
  78. return "vector select condition element type must be i1";
  79. VectorType *ET = dyn_cast<VectorType>(Op1->getType());
  80. if (!ET)
  81. return "selected values for vector select must be vectors";
  82. if (ET->getNumElements() != VT->getNumElements())
  83. return "vector select requires selected vectors to have "
  84. "the same vector length as select condition";
  85. } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
  86. return "select condition must be i1 or <n x i1>";
  87. }
  88. return nullptr;
  89. }
  90. //===----------------------------------------------------------------------===//
  91. // PHINode Class
  92. //===----------------------------------------------------------------------===//
  93. PHINode::PHINode(const PHINode &PN)
  94. : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
  95. ReservedSpace(PN.getNumOperands()) {
  96. allocHungoffUses(PN.getNumOperands());
  97. std::copy(PN.op_begin(), PN.op_end(), op_begin());
  98. std::copy(PN.block_begin(), PN.block_end(), block_begin());
  99. SubclassOptionalData = PN.SubclassOptionalData;
  100. }
  101. // removeIncomingValue - Remove an incoming value. This is useful if a
  102. // predecessor basic block is deleted.
  103. Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
  104. Value *Removed = getIncomingValue(Idx);
  105. // Move everything after this operand down.
  106. //
  107. // FIXME: we could just swap with the end of the list, then erase. However,
  108. // clients might not expect this to happen. The code as it is thrashes the
  109. // use/def lists, which is kinda lame.
  110. std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
  111. std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
  112. // Nuke the last value.
  113. Op<-1>().set(nullptr);
  114. setNumHungOffUseOperands(getNumOperands() - 1);
  115. // If the PHI node is dead, because it has zero entries, nuke it now.
  116. if (getNumOperands() == 0 && DeletePHIIfEmpty) {
  117. // If anyone is using this PHI, make them use a dummy value instead...
  118. replaceAllUsesWith(UndefValue::get(getType()));
  119. eraseFromParent();
  120. }
  121. return Removed;
  122. }
  123. /// growOperands - grow operands - This grows the operand list in response
  124. /// to a push_back style of operation. This grows the number of ops by 1.5
  125. /// times.
  126. ///
  127. void PHINode::growOperands() {
  128. unsigned e = getNumOperands();
  129. unsigned NumOps = e + e / 2;
  130. if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
  131. ReservedSpace = NumOps;
  132. growHungoffUses(ReservedSpace, /* IsPhi */ true);
  133. }
  134. /// hasConstantValue - If the specified PHI node always merges together the same
  135. /// value, return the value, otherwise return null.
  136. Value *PHINode::hasConstantValue() const {
  137. // Exploit the fact that phi nodes always have at least one entry.
  138. Value *ConstantValue = getIncomingValue(0);
  139. for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
  140. if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
  141. if (ConstantValue != this)
  142. return nullptr; // Incoming values not all the same.
  143. // The case where the first value is this PHI.
  144. ConstantValue = getIncomingValue(i);
  145. }
  146. if (ConstantValue == this)
  147. return UndefValue::get(getType());
  148. return ConstantValue;
  149. }
  150. /// hasConstantOrUndefValue - Whether the specified PHI node always merges
  151. /// together the same value, assuming that undefs result in the same value as
  152. /// non-undefs.
  153. /// Unlike \ref hasConstantValue, this does not return a value because the
  154. /// unique non-undef incoming value need not dominate the PHI node.
  155. bool PHINode::hasConstantOrUndefValue() const {
  156. Value *ConstantValue = nullptr;
  157. for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
  158. Value *Incoming = getIncomingValue(i);
  159. if (Incoming != this && !isa<UndefValue>(Incoming)) {
  160. if (ConstantValue && ConstantValue != Incoming)
  161. return false;
  162. ConstantValue = Incoming;
  163. }
  164. }
  165. return true;
  166. }
  167. //===----------------------------------------------------------------------===//
  168. // LandingPadInst Implementation
  169. //===----------------------------------------------------------------------===//
  170. LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
  171. const Twine &NameStr, Instruction *InsertBefore)
  172. : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
  173. init(NumReservedValues, NameStr);
  174. }
  175. LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
  176. const Twine &NameStr, BasicBlock *InsertAtEnd)
  177. : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
  178. init(NumReservedValues, NameStr);
  179. }
  180. LandingPadInst::LandingPadInst(const LandingPadInst &LP)
  181. : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
  182. LP.getNumOperands()),
  183. ReservedSpace(LP.getNumOperands()) {
  184. allocHungoffUses(LP.getNumOperands());
  185. Use *OL = getOperandList();
  186. const Use *InOL = LP.getOperandList();
  187. for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
  188. OL[I] = InOL[I];
  189. setCleanup(LP.isCleanup());
  190. }
  191. LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
  192. const Twine &NameStr,
  193. Instruction *InsertBefore) {
  194. return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
  195. }
  196. LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
  197. const Twine &NameStr,
  198. BasicBlock *InsertAtEnd) {
  199. return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
  200. }
  201. void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
  202. ReservedSpace = NumReservedValues;
  203. setNumHungOffUseOperands(0);
  204. allocHungoffUses(ReservedSpace);
  205. setName(NameStr);
  206. setCleanup(false);
  207. }
  208. /// growOperands - grow operands - This grows the operand list in response to a
  209. /// push_back style of operation. This grows the number of ops by 2 times.
  210. void LandingPadInst::growOperands(unsigned Size) {
  211. unsigned e = getNumOperands();
  212. if (ReservedSpace >= e + Size) return;
  213. ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
  214. growHungoffUses(ReservedSpace);
  215. }
  216. void LandingPadInst::addClause(Constant *Val) {
  217. unsigned OpNo = getNumOperands();
  218. growOperands(1);
  219. assert(OpNo < ReservedSpace && "Growing didn't work!");
  220. setNumHungOffUseOperands(getNumOperands() + 1);
  221. getOperandList()[OpNo] = Val;
  222. }
  223. //===----------------------------------------------------------------------===//
  224. // CallBase Implementation
  225. //===----------------------------------------------------------------------===//
  226. Function *CallBase::getCaller() { return getParent()->getParent(); }
  227. unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
  228. assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
  229. return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
  230. }
  231. bool CallBase::isIndirectCall() const {
  232. const Value *V = getCalledValue();
  233. if (isa<Function>(V) || isa<Constant>(V))
  234. return false;
  235. if (const CallInst *CI = dyn_cast<CallInst>(this))
  236. if (CI->isInlineAsm())
  237. return false;
  238. return true;
  239. }
  240. /// Tests if this call site must be tail call optimized. Only a CallInst can
  241. /// be tail call optimized.
  242. bool CallBase::isMustTailCall() const {
  243. if (auto *CI = dyn_cast<CallInst>(this))
  244. return CI->isMustTailCall();
  245. return false;
  246. }
  247. /// Tests if this call site is marked as a tail call.
  248. bool CallBase::isTailCall() const {
  249. if (auto *CI = dyn_cast<CallInst>(this))
  250. return CI->isTailCall();
  251. return false;
  252. }
  253. Intrinsic::ID CallBase::getIntrinsicID() const {
  254. if (auto *F = getCalledFunction())
  255. return F->getIntrinsicID();
  256. return Intrinsic::not_intrinsic;
  257. }
  258. bool CallBase::isReturnNonNull() const {
  259. if (hasRetAttr(Attribute::NonNull))
  260. return true;
  261. if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
  262. !NullPointerIsDefined(getCaller(),
  263. getType()->getPointerAddressSpace()))
  264. return true;
  265. return false;
  266. }
  267. Value *CallBase::getReturnedArgOperand() const {
  268. unsigned Index;
  269. if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
  270. return getArgOperand(Index - AttributeList::FirstArgIndex);
  271. if (const Function *F = getCalledFunction())
  272. if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
  273. Index)
  274. return getArgOperand(Index - AttributeList::FirstArgIndex);
  275. return nullptr;
  276. }
  277. bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const {
  278. if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
  279. return true;
  280. // Look at the callee, if available.
  281. if (const Function *F = getCalledFunction())
  282. return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
  283. return false;
  284. }
  285. /// Determine whether the argument or parameter has the given attribute.
  286. bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
  287. assert(ArgNo < getNumArgOperands() && "Param index out of bounds!");
  288. if (Attrs.hasParamAttribute(ArgNo, Kind))
  289. return true;
  290. if (const Function *F = getCalledFunction())
  291. return F->getAttributes().hasParamAttribute(ArgNo, Kind);
  292. return false;
  293. }
  294. bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
  295. if (const Function *F = getCalledFunction())
  296. return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind);
  297. return false;
  298. }
  299. bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
  300. if (const Function *F = getCalledFunction())
  301. return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind);
  302. return false;
  303. }
  304. CallBase::op_iterator
  305. CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
  306. const unsigned BeginIndex) {
  307. auto It = op_begin() + BeginIndex;
  308. for (auto &B : Bundles)
  309. It = std::copy(B.input_begin(), B.input_end(), It);
  310. auto *ContextImpl = getContext().pImpl;
  311. auto BI = Bundles.begin();
  312. unsigned CurrentIndex = BeginIndex;
  313. for (auto &BOI : bundle_op_infos()) {
  314. assert(BI != Bundles.end() && "Incorrect allocation?");
  315. BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
  316. BOI.Begin = CurrentIndex;
  317. BOI.End = CurrentIndex + BI->input_size();
  318. CurrentIndex = BOI.End;
  319. BI++;
  320. }
  321. assert(BI == Bundles.end() && "Incorrect allocation?");
  322. return It;
  323. }
  324. //===----------------------------------------------------------------------===//
  325. // CallInst Implementation
  326. //===----------------------------------------------------------------------===//
  327. void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
  328. ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
  329. this->FTy = FTy;
  330. assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
  331. "NumOperands not set up?");
  332. setCalledOperand(Func);
  333. #ifndef NDEBUG
  334. assert((Args.size() == FTy->getNumParams() ||
  335. (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
  336. "Calling a function with bad signature!");
  337. for (unsigned i = 0; i != Args.size(); ++i)
  338. assert((i >= FTy->getNumParams() ||
  339. FTy->getParamType(i) == Args[i]->getType()) &&
  340. "Calling a function with a bad signature!");
  341. #endif
  342. llvm::copy(Args, op_begin());
  343. auto It = populateBundleOperandInfos(Bundles, Args.size());
  344. (void)It;
  345. assert(It + 1 == op_end() && "Should add up!");
  346. setName(NameStr);
  347. }
  348. void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
  349. this->FTy = FTy;
  350. assert(getNumOperands() == 1 && "NumOperands not set up?");
  351. setCalledOperand(Func);
  352. assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
  353. setName(NameStr);
  354. }
  355. CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
  356. Instruction *InsertBefore)
  357. : CallBase(Ty->getReturnType(), Instruction::Call,
  358. OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
  359. init(Ty, Func, Name);
  360. }
  361. CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
  362. BasicBlock *InsertAtEnd)
  363. : CallBase(Ty->getReturnType(), Instruction::Call,
  364. OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
  365. init(Ty, Func, Name);
  366. }
  367. CallInst::CallInst(const CallInst &CI)
  368. : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
  369. OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
  370. CI.getNumOperands()) {
  371. setTailCallKind(CI.getTailCallKind());
  372. setCallingConv(CI.getCallingConv());
  373. std::copy(CI.op_begin(), CI.op_end(), op_begin());
  374. std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
  375. bundle_op_info_begin());
  376. SubclassOptionalData = CI.SubclassOptionalData;
  377. }
  378. CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
  379. Instruction *InsertPt) {
  380. std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
  381. auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(),
  382. Args, OpB, CI->getName(), InsertPt);
  383. NewCI->setTailCallKind(CI->getTailCallKind());
  384. NewCI->setCallingConv(CI->getCallingConv());
  385. NewCI->SubclassOptionalData = CI->SubclassOptionalData;
  386. NewCI->setAttributes(CI->getAttributes());
  387. NewCI->setDebugLoc(CI->getDebugLoc());
  388. return NewCI;
  389. }
  390. // Update profile weight for call instruction by scaling it using the ratio
  391. // of S/T. The meaning of "branch_weights" meta data for call instruction is
  392. // transfered to represent call count.
  393. void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
  394. auto *ProfileData = getMetadata(LLVMContext::MD_prof);
  395. if (ProfileData == nullptr)
  396. return;
  397. auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
  398. if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
  399. !ProfDataName->getString().equals("VP")))
  400. return;
  401. if (T == 0) {
  402. LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
  403. "div by 0. Ignoring. Likely the function "
  404. << getParent()->getParent()->getName()
  405. << " has 0 entry count, and contains call instructions "
  406. "with non-zero prof info.");
  407. return;
  408. }
  409. MDBuilder MDB(getContext());
  410. SmallVector<Metadata *, 3> Vals;
  411. Vals.push_back(ProfileData->getOperand(0));
  412. APInt APS(128, S), APT(128, T);
  413. if (ProfDataName->getString().equals("branch_weights") &&
  414. ProfileData->getNumOperands() > 0) {
  415. // Using APInt::div may be expensive, but most cases should fit 64 bits.
  416. APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
  417. ->getValue()
  418. .getZExtValue());
  419. Val *= APS;
  420. Vals.push_back(MDB.createConstant(ConstantInt::get(
  421. Type::getInt64Ty(getContext()), Val.udiv(APT).getLimitedValue())));
  422. } else if (ProfDataName->getString().equals("VP"))
  423. for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
  424. // The first value is the key of the value profile, which will not change.
  425. Vals.push_back(ProfileData->getOperand(i));
  426. // Using APInt::div may be expensive, but most cases should fit 64 bits.
  427. APInt Val(128,
  428. mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
  429. ->getValue()
  430. .getZExtValue());
  431. Val *= APS;
  432. Vals.push_back(MDB.createConstant(
  433. ConstantInt::get(Type::getInt64Ty(getContext()),
  434. Val.udiv(APT).getLimitedValue())));
  435. }
  436. setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
  437. }
  438. /// IsConstantOne - Return true only if val is constant int 1
  439. static bool IsConstantOne(Value *val) {
  440. assert(val && "IsConstantOne does not work with nullptr val");
  441. const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
  442. return CVal && CVal->isOne();
  443. }
  444. static Instruction *createMalloc(Instruction *InsertBefore,
  445. BasicBlock *InsertAtEnd, Type *IntPtrTy,
  446. Type *AllocTy, Value *AllocSize,
  447. Value *ArraySize,
  448. ArrayRef<OperandBundleDef> OpB,
  449. Function *MallocF, const Twine &Name) {
  450. assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
  451. "createMalloc needs either InsertBefore or InsertAtEnd");
  452. // malloc(type) becomes:
  453. // bitcast (i8* malloc(typeSize)) to type*
  454. // malloc(type, arraySize) becomes:
  455. // bitcast (i8* malloc(typeSize*arraySize)) to type*
  456. if (!ArraySize)
  457. ArraySize = ConstantInt::get(IntPtrTy, 1);
  458. else if (ArraySize->getType() != IntPtrTy) {
  459. if (InsertBefore)
  460. ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
  461. "", InsertBefore);
  462. else
  463. ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
  464. "", InsertAtEnd);
  465. }
  466. if (!IsConstantOne(ArraySize)) {
  467. if (IsConstantOne(AllocSize)) {
  468. AllocSize = ArraySize; // Operand * 1 = Operand
  469. } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
  470. Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
  471. false /*ZExt*/);
  472. // Malloc arg is constant product of type size and array size
  473. AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
  474. } else {
  475. // Multiply type size by the array size...
  476. if (InsertBefore)
  477. AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
  478. "mallocsize", InsertBefore);
  479. else
  480. AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
  481. "mallocsize", InsertAtEnd);
  482. }
  483. }
  484. assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
  485. // Create the call to Malloc.
  486. BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
  487. Module *M = BB->getParent()->getParent();
  488. Type *BPTy = Type::getInt8PtrTy(BB->getContext());
  489. FunctionCallee MallocFunc = MallocF;
  490. if (!MallocFunc)
  491. // prototype malloc as "void *malloc(size_t)"
  492. MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
  493. PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
  494. CallInst *MCall = nullptr;
  495. Instruction *Result = nullptr;
  496. if (InsertBefore) {
  497. MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
  498. InsertBefore);
  499. Result = MCall;
  500. if (Result->getType() != AllocPtrType)
  501. // Create a cast instruction to convert to the right type...
  502. Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
  503. } else {
  504. MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
  505. Result = MCall;
  506. if (Result->getType() != AllocPtrType) {
  507. InsertAtEnd->getInstList().push_back(MCall);
  508. // Create a cast instruction to convert to the right type...
  509. Result = new BitCastInst(MCall, AllocPtrType, Name);
  510. }
  511. }
  512. MCall->setTailCall();
  513. if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
  514. MCall->setCallingConv(F->getCallingConv());
  515. if (!F->returnDoesNotAlias())
  516. F->setReturnDoesNotAlias();
  517. }
  518. assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
  519. return Result;
  520. }
  521. /// CreateMalloc - Generate the IR for a call to malloc:
  522. /// 1. Compute the malloc call's argument as the specified type's size,
  523. /// possibly multiplied by the array size if the array size is not
  524. /// constant 1.
  525. /// 2. Call malloc with that argument.
  526. /// 3. Bitcast the result of the malloc call to the specified type.
  527. Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
  528. Type *IntPtrTy, Type *AllocTy,
  529. Value *AllocSize, Value *ArraySize,
  530. Function *MallocF,
  531. const Twine &Name) {
  532. return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
  533. ArraySize, None, MallocF, Name);
  534. }
  535. Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
  536. Type *IntPtrTy, Type *AllocTy,
  537. Value *AllocSize, Value *ArraySize,
  538. ArrayRef<OperandBundleDef> OpB,
  539. Function *MallocF,
  540. const Twine &Name) {
  541. return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
  542. ArraySize, OpB, MallocF, Name);
  543. }
  544. /// CreateMalloc - Generate the IR for a call to malloc:
  545. /// 1. Compute the malloc call's argument as the specified type's size,
  546. /// possibly multiplied by the array size if the array size is not
  547. /// constant 1.
  548. /// 2. Call malloc with that argument.
  549. /// 3. Bitcast the result of the malloc call to the specified type.
  550. /// Note: This function does not add the bitcast to the basic block, that is the
  551. /// responsibility of the caller.
  552. Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
  553. Type *IntPtrTy, Type *AllocTy,
  554. Value *AllocSize, Value *ArraySize,
  555. Function *MallocF, const Twine &Name) {
  556. return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
  557. ArraySize, None, MallocF, Name);
  558. }
  559. Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
  560. Type *IntPtrTy, Type *AllocTy,
  561. Value *AllocSize, Value *ArraySize,
  562. ArrayRef<OperandBundleDef> OpB,
  563. Function *MallocF, const Twine &Name) {
  564. return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
  565. ArraySize, OpB, MallocF, Name);
  566. }
  567. static Instruction *createFree(Value *Source,
  568. ArrayRef<OperandBundleDef> Bundles,
  569. Instruction *InsertBefore,
  570. BasicBlock *InsertAtEnd) {
  571. assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
  572. "createFree needs either InsertBefore or InsertAtEnd");
  573. assert(Source->getType()->isPointerTy() &&
  574. "Can not free something of nonpointer type!");
  575. BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
  576. Module *M = BB->getParent()->getParent();
  577. Type *VoidTy = Type::getVoidTy(M->getContext());
  578. Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
  579. // prototype free as "void free(void*)"
  580. FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
  581. CallInst *Result = nullptr;
  582. Value *PtrCast = Source;
  583. if (InsertBefore) {
  584. if (Source->getType() != IntPtrTy)
  585. PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
  586. Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
  587. } else {
  588. if (Source->getType() != IntPtrTy)
  589. PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
  590. Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
  591. }
  592. Result->setTailCall();
  593. if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
  594. Result->setCallingConv(F->getCallingConv());
  595. return Result;
  596. }
  597. /// CreateFree - Generate the IR for a call to the builtin free function.
  598. Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) {
  599. return createFree(Source, None, InsertBefore, nullptr);
  600. }
  601. Instruction *CallInst::CreateFree(Value *Source,
  602. ArrayRef<OperandBundleDef> Bundles,
  603. Instruction *InsertBefore) {
  604. return createFree(Source, Bundles, InsertBefore, nullptr);
  605. }
  606. /// CreateFree - Generate the IR for a call to the builtin free function.
  607. /// Note: This function does not add the call to the basic block, that is the
  608. /// responsibility of the caller.
  609. Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) {
  610. Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd);
  611. assert(FreeCall && "CreateFree did not create a CallInst");
  612. return FreeCall;
  613. }
  614. Instruction *CallInst::CreateFree(Value *Source,
  615. ArrayRef<OperandBundleDef> Bundles,
  616. BasicBlock *InsertAtEnd) {
  617. Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
  618. assert(FreeCall && "CreateFree did not create a CallInst");
  619. return FreeCall;
  620. }
  621. //===----------------------------------------------------------------------===//
  622. // InvokeInst Implementation
  623. //===----------------------------------------------------------------------===//
  624. void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
  625. BasicBlock *IfException, ArrayRef<Value *> Args,
  626. ArrayRef<OperandBundleDef> Bundles,
  627. const Twine &NameStr) {
  628. this->FTy = FTy;
  629. assert((int)getNumOperands() ==
  630. ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
  631. "NumOperands not set up?");
  632. setNormalDest(IfNormal);
  633. setUnwindDest(IfException);
  634. setCalledOperand(Fn);
  635. #ifndef NDEBUG
  636. assert(((Args.size() == FTy->getNumParams()) ||
  637. (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
  638. "Invoking a function with bad signature");
  639. for (unsigned i = 0, e = Args.size(); i != e; i++)
  640. assert((i >= FTy->getNumParams() ||
  641. FTy->getParamType(i) == Args[i]->getType()) &&
  642. "Invoking a function with a bad signature!");
  643. #endif
  644. llvm::copy(Args, op_begin());
  645. auto It = populateBundleOperandInfos(Bundles, Args.size());
  646. (void)It;
  647. assert(It + 3 == op_end() && "Should add up!");
  648. setName(NameStr);
  649. }
  650. InvokeInst::InvokeInst(const InvokeInst &II)
  651. : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
  652. OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
  653. II.getNumOperands()) {
  654. setCallingConv(II.getCallingConv());
  655. std::copy(II.op_begin(), II.op_end(), op_begin());
  656. std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
  657. bundle_op_info_begin());
  658. SubclassOptionalData = II.SubclassOptionalData;
  659. }
  660. InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
  661. Instruction *InsertPt) {
  662. std::vector<Value *> Args(II->arg_begin(), II->arg_end());
  663. auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(),
  664. II->getNormalDest(), II->getUnwindDest(),
  665. Args, OpB, II->getName(), InsertPt);
  666. NewII->setCallingConv(II->getCallingConv());
  667. NewII->SubclassOptionalData = II->SubclassOptionalData;
  668. NewII->setAttributes(II->getAttributes());
  669. NewII->setDebugLoc(II->getDebugLoc());
  670. return NewII;
  671. }
  672. LandingPadInst *InvokeInst::getLandingPadInst() const {
  673. return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
  674. }
  675. //===----------------------------------------------------------------------===//
  676. // CallBrInst Implementation
  677. //===----------------------------------------------------------------------===//
  678. void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
  679. ArrayRef<BasicBlock *> IndirectDests,
  680. ArrayRef<Value *> Args,
  681. ArrayRef<OperandBundleDef> Bundles,
  682. const Twine &NameStr) {
  683. this->FTy = FTy;
  684. assert((int)getNumOperands() ==
  685. ComputeNumOperands(Args.size(), IndirectDests.size(),
  686. CountBundleInputs(Bundles)) &&
  687. "NumOperands not set up?");
  688. NumIndirectDests = IndirectDests.size();
  689. setDefaultDest(Fallthrough);
  690. for (unsigned i = 0; i != NumIndirectDests; ++i)
  691. setIndirectDest(i, IndirectDests[i]);
  692. setCalledOperand(Fn);
  693. #ifndef NDEBUG
  694. assert(((Args.size() == FTy->getNumParams()) ||
  695. (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
  696. "Calling a function with bad signature");
  697. for (unsigned i = 0, e = Args.size(); i != e; i++)
  698. assert((i >= FTy->getNumParams() ||
  699. FTy->getParamType(i) == Args[i]->getType()) &&
  700. "Calling a function with a bad signature!");
  701. #endif
  702. std::copy(Args.begin(), Args.end(), op_begin());
  703. auto It = populateBundleOperandInfos(Bundles, Args.size());
  704. (void)It;
  705. assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
  706. setName(NameStr);
  707. }
  708. void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) {
  709. assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr");
  710. if (BasicBlock *OldBB = getIndirectDest(i)) {
  711. BlockAddress *Old = BlockAddress::get(OldBB);
  712. BlockAddress *New = BlockAddress::get(B);
  713. for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo)
  714. if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old)
  715. setArgOperand(ArgNo, New);
  716. }
  717. }
  718. CallBrInst::CallBrInst(const CallBrInst &CBI)
  719. : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
  720. OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
  721. CBI.getNumOperands()) {
  722. setCallingConv(CBI.getCallingConv());
  723. std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
  724. std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
  725. bundle_op_info_begin());
  726. SubclassOptionalData = CBI.SubclassOptionalData;
  727. NumIndirectDests = CBI.NumIndirectDests;
  728. }
  729. CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
  730. Instruction *InsertPt) {
  731. std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
  732. auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(),
  733. CBI->getCalledValue(),
  734. CBI->getDefaultDest(),
  735. CBI->getIndirectDests(),
  736. Args, OpB, CBI->getName(), InsertPt);
  737. NewCBI->setCallingConv(CBI->getCallingConv());
  738. NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
  739. NewCBI->setAttributes(CBI->getAttributes());
  740. NewCBI->setDebugLoc(CBI->getDebugLoc());
  741. NewCBI->NumIndirectDests = CBI->NumIndirectDests;
  742. return NewCBI;
  743. }
  744. //===----------------------------------------------------------------------===//
  745. // ReturnInst Implementation
  746. //===----------------------------------------------------------------------===//
  747. ReturnInst::ReturnInst(const ReturnInst &RI)
  748. : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
  749. OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
  750. RI.getNumOperands()) {
  751. if (RI.getNumOperands())
  752. Op<0>() = RI.Op<0>();
  753. SubclassOptionalData = RI.SubclassOptionalData;
  754. }
  755. ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
  756. : Instruction(Type::getVoidTy(C), Instruction::Ret,
  757. OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
  758. InsertBefore) {
  759. if (retVal)
  760. Op<0>() = retVal;
  761. }
  762. ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
  763. : Instruction(Type::getVoidTy(C), Instruction::Ret,
  764. OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
  765. InsertAtEnd) {
  766. if (retVal)
  767. Op<0>() = retVal;
  768. }
  769. ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
  770. : Instruction(Type::getVoidTy(Context), Instruction::Ret,
  771. OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
  772. //===----------------------------------------------------------------------===//
  773. // ResumeInst Implementation
  774. //===----------------------------------------------------------------------===//
  775. ResumeInst::ResumeInst(const ResumeInst &RI)
  776. : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
  777. OperandTraits<ResumeInst>::op_begin(this), 1) {
  778. Op<0>() = RI.Op<0>();
  779. }
  780. ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
  781. : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
  782. OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
  783. Op<0>() = Exn;
  784. }
  785. ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
  786. : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
  787. OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
  788. Op<0>() = Exn;
  789. }
  790. //===----------------------------------------------------------------------===//
  791. // CleanupReturnInst Implementation
  792. //===----------------------------------------------------------------------===//
  793. CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
  794. : Instruction(CRI.getType(), Instruction::CleanupRet,
  795. OperandTraits<CleanupReturnInst>::op_end(this) -
  796. CRI.getNumOperands(),
  797. CRI.getNumOperands()) {
  798. setInstructionSubclassData(CRI.getSubclassDataFromInstruction());
  799. Op<0>() = CRI.Op<0>();
  800. if (CRI.hasUnwindDest())
  801. Op<1>() = CRI.Op<1>();
  802. }
  803. void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
  804. if (UnwindBB)
  805. setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
  806. Op<0>() = CleanupPad;
  807. if (UnwindBB)
  808. Op<1>() = UnwindBB;
  809. }
  810. CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
  811. unsigned Values, Instruction *InsertBefore)
  812. : Instruction(Type::getVoidTy(CleanupPad->getContext()),
  813. Instruction::CleanupRet,
  814. OperandTraits<CleanupReturnInst>::op_end(this) - Values,
  815. Values, InsertBefore) {
  816. init(CleanupPad, UnwindBB);
  817. }
  818. CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
  819. unsigned Values, BasicBlock *InsertAtEnd)
  820. : Instruction(Type::getVoidTy(CleanupPad->getContext()),
  821. Instruction::CleanupRet,
  822. OperandTraits<CleanupReturnInst>::op_end(this) - Values,
  823. Values, InsertAtEnd) {
  824. init(CleanupPad, UnwindBB);
  825. }
  826. //===----------------------------------------------------------------------===//
  827. // CatchReturnInst Implementation
  828. //===----------------------------------------------------------------------===//
  829. void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
  830. Op<0>() = CatchPad;
  831. Op<1>() = BB;
  832. }
  833. CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
  834. : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
  835. OperandTraits<CatchReturnInst>::op_begin(this), 2) {
  836. Op<0>() = CRI.Op<0>();
  837. Op<1>() = CRI.Op<1>();
  838. }
  839. CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
  840. Instruction *InsertBefore)
  841. : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
  842. OperandTraits<CatchReturnInst>::op_begin(this), 2,
  843. InsertBefore) {
  844. init(CatchPad, BB);
  845. }
  846. CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
  847. BasicBlock *InsertAtEnd)
  848. : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
  849. OperandTraits<CatchReturnInst>::op_begin(this), 2,
  850. InsertAtEnd) {
  851. init(CatchPad, BB);
  852. }
  853. //===----------------------------------------------------------------------===//
  854. // CatchSwitchInst Implementation
  855. //===----------------------------------------------------------------------===//
  856. CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
  857. unsigned NumReservedValues,
  858. const Twine &NameStr,
  859. Instruction *InsertBefore)
  860. : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
  861. InsertBefore) {
  862. if (UnwindDest)
  863. ++NumReservedValues;
  864. init(ParentPad, UnwindDest, NumReservedValues + 1);
  865. setName(NameStr);
  866. }
  867. CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
  868. unsigned NumReservedValues,
  869. const Twine &NameStr, BasicBlock *InsertAtEnd)
  870. : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
  871. InsertAtEnd) {
  872. if (UnwindDest)
  873. ++NumReservedValues;
  874. init(ParentPad, UnwindDest, NumReservedValues + 1);
  875. setName(NameStr);
  876. }
  877. CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
  878. : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
  879. CSI.getNumOperands()) {
  880. init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
  881. setNumHungOffUseOperands(ReservedSpace);
  882. Use *OL = getOperandList();
  883. const Use *InOL = CSI.getOperandList();
  884. for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
  885. OL[I] = InOL[I];
  886. }
  887. void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
  888. unsigned NumReservedValues) {
  889. assert(ParentPad && NumReservedValues);
  890. ReservedSpace = NumReservedValues;
  891. setNumHungOffUseOperands(UnwindDest ? 2 : 1);
  892. allocHungoffUses(ReservedSpace);
  893. Op<0>() = ParentPad;
  894. if (UnwindDest) {
  895. setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
  896. setUnwindDest(UnwindDest);
  897. }
  898. }
  899. /// growOperands - grow operands - This grows the operand list in response to a
  900. /// push_back style of operation. This grows the number of ops by 2 times.
  901. void CatchSwitchInst::growOperands(unsigned Size) {
  902. unsigned NumOperands = getNumOperands();
  903. assert(NumOperands >= 1);
  904. if (ReservedSpace >= NumOperands + Size)
  905. return;
  906. ReservedSpace = (NumOperands + Size / 2) * 2;
  907. growHungoffUses(ReservedSpace);
  908. }
  909. void CatchSwitchInst::addHandler(BasicBlock *Handler) {
  910. unsigned OpNo = getNumOperands();
  911. growOperands(1);
  912. assert(OpNo < ReservedSpace && "Growing didn't work!");
  913. setNumHungOffUseOperands(getNumOperands() + 1);
  914. getOperandList()[OpNo] = Handler;
  915. }
  916. void CatchSwitchInst::removeHandler(handler_iterator HI) {
  917. // Move all subsequent handlers up one.
  918. Use *EndDst = op_end() - 1;
  919. for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
  920. *CurDst = *(CurDst + 1);
  921. // Null out the last handler use.
  922. *EndDst = nullptr;
  923. setNumHungOffUseOperands(getNumOperands() - 1);
  924. }
  925. //===----------------------------------------------------------------------===//
  926. // FuncletPadInst Implementation
  927. //===----------------------------------------------------------------------===//
  928. void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
  929. const Twine &NameStr) {
  930. assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
  931. llvm::copy(Args, op_begin());
  932. setParentPad(ParentPad);
  933. setName(NameStr);
  934. }
  935. FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
  936. : Instruction(FPI.getType(), FPI.getOpcode(),
  937. OperandTraits<FuncletPadInst>::op_end(this) -
  938. FPI.getNumOperands(),
  939. FPI.getNumOperands()) {
  940. std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
  941. setParentPad(FPI.getParentPad());
  942. }
  943. FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
  944. ArrayRef<Value *> Args, unsigned Values,
  945. const Twine &NameStr, Instruction *InsertBefore)
  946. : Instruction(ParentPad->getType(), Op,
  947. OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
  948. InsertBefore) {
  949. init(ParentPad, Args, NameStr);
  950. }
  951. FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
  952. ArrayRef<Value *> Args, unsigned Values,
  953. const Twine &NameStr, BasicBlock *InsertAtEnd)
  954. : Instruction(ParentPad->getType(), Op,
  955. OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
  956. InsertAtEnd) {
  957. init(ParentPad, Args, NameStr);
  958. }
  959. //===----------------------------------------------------------------------===//
  960. // UnreachableInst Implementation
  961. //===----------------------------------------------------------------------===//
  962. UnreachableInst::UnreachableInst(LLVMContext &Context,
  963. Instruction *InsertBefore)
  964. : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
  965. 0, InsertBefore) {}
  966. UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
  967. : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
  968. 0, InsertAtEnd) {}
  969. //===----------------------------------------------------------------------===//
  970. // BranchInst Implementation
  971. //===----------------------------------------------------------------------===//
  972. void BranchInst::AssertOK() {
  973. if (isConditional())
  974. assert(getCondition()->getType()->isIntegerTy(1) &&
  975. "May only branch on boolean predicates!");
  976. }
  977. BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
  978. : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
  979. OperandTraits<BranchInst>::op_end(this) - 1, 1,
  980. InsertBefore) {
  981. assert(IfTrue && "Branch destination may not be null!");
  982. Op<-1>() = IfTrue;
  983. }
  984. BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
  985. Instruction *InsertBefore)
  986. : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
  987. OperandTraits<BranchInst>::op_end(this) - 3, 3,
  988. InsertBefore) {
  989. Op<-1>() = IfTrue;
  990. Op<-2>() = IfFalse;
  991. Op<-3>() = Cond;
  992. #ifndef NDEBUG
  993. AssertOK();
  994. #endif
  995. }
  996. BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
  997. : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
  998. OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
  999. assert(IfTrue && "Branch destination may not be null!");
  1000. Op<-1>() = IfTrue;
  1001. }
  1002. BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
  1003. BasicBlock *InsertAtEnd)
  1004. : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
  1005. OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
  1006. Op<-1>() = IfTrue;
  1007. Op<-2>() = IfFalse;
  1008. Op<-3>() = Cond;
  1009. #ifndef NDEBUG
  1010. AssertOK();
  1011. #endif
  1012. }
  1013. BranchInst::BranchInst(const BranchInst &BI)
  1014. : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
  1015. OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
  1016. BI.getNumOperands()) {
  1017. Op<-1>() = BI.Op<-1>();
  1018. if (BI.getNumOperands() != 1) {
  1019. assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
  1020. Op<-3>() = BI.Op<-3>();
  1021. Op<-2>() = BI.Op<-2>();
  1022. }
  1023. SubclassOptionalData = BI.SubclassOptionalData;
  1024. }
  1025. void BranchInst::swapSuccessors() {
  1026. assert(isConditional() &&
  1027. "Cannot swap successors of an unconditional branch");
  1028. Op<-1>().swap(Op<-2>());
  1029. // Update profile metadata if present and it matches our structural
  1030. // expectations.
  1031. swapProfMetadata();
  1032. }
  1033. //===----------------------------------------------------------------------===//
  1034. // AllocaInst Implementation
  1035. //===----------------------------------------------------------------------===//
  1036. static Value *getAISize(LLVMContext &Context, Value *Amt) {
  1037. if (!Amt)
  1038. Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
  1039. else {
  1040. assert(!isa<BasicBlock>(Amt) &&
  1041. "Passed basic block into allocation size parameter! Use other ctor");
  1042. assert(Amt->getType()->isIntegerTy() &&
  1043. "Allocation array size is not an integer!");
  1044. }
  1045. return Amt;
  1046. }
  1047. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
  1048. Instruction *InsertBefore)
  1049. : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
  1050. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
  1051. BasicBlock *InsertAtEnd)
  1052. : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
  1053. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
  1054. const Twine &Name, Instruction *InsertBefore)
  1055. : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {}
  1056. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
  1057. const Twine &Name, BasicBlock *InsertAtEnd)
  1058. : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {}
  1059. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
  1060. unsigned Align, const Twine &Name,
  1061. Instruction *InsertBefore)
  1062. : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
  1063. getAISize(Ty->getContext(), ArraySize), InsertBefore),
  1064. AllocatedType(Ty) {
  1065. setAlignment(Align);
  1066. assert(!Ty->isVoidTy() && "Cannot allocate void!");
  1067. setName(Name);
  1068. }
  1069. AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
  1070. unsigned Align, const Twine &Name,
  1071. BasicBlock *InsertAtEnd)
  1072. : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
  1073. getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
  1074. AllocatedType(Ty) {
  1075. setAlignment(Align);
  1076. assert(!Ty->isVoidTy() && "Cannot allocate void!");
  1077. setName(Name);
  1078. }
  1079. void AllocaInst::setAlignment(unsigned Align) {
  1080. assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
  1081. assert(Align <= MaximumAlignment &&
  1082. "Alignment is greater than MaximumAlignment!");
  1083. setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
  1084. (Log2_32(Align) + 1));
  1085. assert(getAlignment() == Align && "Alignment representation error!");
  1086. }
  1087. bool AllocaInst::isArrayAllocation() const {
  1088. if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
  1089. return !CI->isOne();
  1090. return true;
  1091. }
  1092. /// isStaticAlloca - Return true if this alloca is in the entry block of the
  1093. /// function and is a constant size. If so, the code generator will fold it
  1094. /// into the prolog/epilog code, so it is basically free.
  1095. bool AllocaInst::isStaticAlloca() const {
  1096. // Must be constant size.
  1097. if (!isa<ConstantInt>(getArraySize())) return false;
  1098. // Must be in the entry block.
  1099. const BasicBlock *Parent = getParent();
  1100. return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
  1101. }
  1102. //===----------------------------------------------------------------------===//
  1103. // LoadInst Implementation
  1104. //===----------------------------------------------------------------------===//
  1105. void LoadInst::AssertOK() {
  1106. assert(getOperand(0)->getType()->isPointerTy() &&
  1107. "Ptr must have pointer type.");
  1108. assert(!(isAtomic() && getAlignment() == 0) &&
  1109. "Alignment required for atomic load");
  1110. }
  1111. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
  1112. Instruction *InsertBef)
  1113. : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
  1114. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
  1115. BasicBlock *InsertAE)
  1116. : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
  1117. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1118. Instruction *InsertBef)
  1119. : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {}
  1120. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1121. BasicBlock *InsertAE)
  1122. : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
  1123. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1124. unsigned Align, Instruction *InsertBef)
  1125. : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
  1126. SyncScope::System, InsertBef) {}
  1127. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1128. unsigned Align, BasicBlock *InsertAE)
  1129. : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
  1130. SyncScope::System, InsertAE) {}
  1131. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1132. unsigned Align, AtomicOrdering Order,
  1133. SyncScope::ID SSID, Instruction *InsertBef)
  1134. : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
  1135. assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
  1136. setVolatile(isVolatile);
  1137. setAlignment(Align);
  1138. setAtomic(Order, SSID);
  1139. AssertOK();
  1140. setName(Name);
  1141. }
  1142. LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
  1143. unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
  1144. BasicBlock *InsertAE)
  1145. : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
  1146. assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
  1147. setVolatile(isVolatile);
  1148. setAlignment(Align);
  1149. setAtomic(Order, SSID);
  1150. AssertOK();
  1151. setName(Name);
  1152. }
  1153. void LoadInst::setAlignment(unsigned Align) {
  1154. assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
  1155. assert(Align <= MaximumAlignment &&
  1156. "Alignment is greater than MaximumAlignment!");
  1157. setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
  1158. ((Log2_32(Align)+1)<<1));
  1159. assert(getAlignment() == Align && "Alignment representation error!");
  1160. }
  1161. //===----------------------------------------------------------------------===//
  1162. // StoreInst Implementation
  1163. //===----------------------------------------------------------------------===//
  1164. void StoreInst::AssertOK() {
  1165. assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
  1166. assert(getOperand(1)->getType()->isPointerTy() &&
  1167. "Ptr must have pointer type!");
  1168. assert(getOperand(0)->getType() ==
  1169. cast<PointerType>(getOperand(1)->getType())->getElementType()
  1170. && "Ptr must be a pointer to Val type!");
  1171. assert(!(isAtomic() && getAlignment() == 0) &&
  1172. "Alignment required for atomic store");
  1173. }
  1174. StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
  1175. : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
  1176. StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
  1177. : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
  1178. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
  1179. Instruction *InsertBefore)
  1180. : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {}
  1181. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
  1182. BasicBlock *InsertAtEnd)
  1183. : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {}
  1184. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
  1185. Instruction *InsertBefore)
  1186. : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
  1187. SyncScope::System, InsertBefore) {}
  1188. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
  1189. BasicBlock *InsertAtEnd)
  1190. : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
  1191. SyncScope::System, InsertAtEnd) {}
  1192. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
  1193. unsigned Align, AtomicOrdering Order,
  1194. SyncScope::ID SSID,
  1195. Instruction *InsertBefore)
  1196. : Instruction(Type::getVoidTy(val->getContext()), Store,
  1197. OperandTraits<StoreInst>::op_begin(this),
  1198. OperandTraits<StoreInst>::operands(this),
  1199. InsertBefore) {
  1200. Op<0>() = val;
  1201. Op<1>() = addr;
  1202. setVolatile(isVolatile);
  1203. setAlignment(Align);
  1204. setAtomic(Order, SSID);
  1205. AssertOK();
  1206. }
  1207. StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
  1208. unsigned Align, AtomicOrdering Order,
  1209. SyncScope::ID SSID,
  1210. BasicBlock *InsertAtEnd)
  1211. : Instruction(Type::getVoidTy(val->getContext()), Store,
  1212. OperandTraits<StoreInst>::op_begin(this),
  1213. OperandTraits<StoreInst>::operands(this),
  1214. InsertAtEnd) {
  1215. Op<0>() = val;
  1216. Op<1>() = addr;
  1217. setVolatile(isVolatile);
  1218. setAlignment(Align);
  1219. setAtomic(Order, SSID);
  1220. AssertOK();
  1221. }
  1222. void StoreInst::setAlignment(unsigned Align) {
  1223. assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
  1224. assert(Align <= MaximumAlignment &&
  1225. "Alignment is greater than MaximumAlignment!");
  1226. setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
  1227. ((Log2_32(Align)+1) << 1));
  1228. assert(getAlignment() == Align && "Alignment representation error!");
  1229. }
  1230. //===----------------------------------------------------------------------===//
  1231. // AtomicCmpXchgInst Implementation
  1232. //===----------------------------------------------------------------------===//
  1233. void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
  1234. AtomicOrdering SuccessOrdering,
  1235. AtomicOrdering FailureOrdering,
  1236. SyncScope::ID SSID) {
  1237. Op<0>() = Ptr;
  1238. Op<1>() = Cmp;
  1239. Op<2>() = NewVal;
  1240. setSuccessOrdering(SuccessOrdering);
  1241. setFailureOrdering(FailureOrdering);
  1242. setSyncScopeID(SSID);
  1243. assert(getOperand(0) && getOperand(1) && getOperand(2) &&
  1244. "All operands must be non-null!");
  1245. assert(getOperand(0)->getType()->isPointerTy() &&
  1246. "Ptr must have pointer type!");
  1247. assert(getOperand(1)->getType() ==
  1248. cast<PointerType>(getOperand(0)->getType())->getElementType()
  1249. && "Ptr must be a pointer to Cmp type!");
  1250. assert(getOperand(2)->getType() ==
  1251. cast<PointerType>(getOperand(0)->getType())->getElementType()
  1252. && "Ptr must be a pointer to NewVal type!");
  1253. assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
  1254. "AtomicCmpXchg instructions must be atomic!");
  1255. assert(FailureOrdering != AtomicOrdering::NotAtomic &&
  1256. "AtomicCmpXchg instructions must be atomic!");
  1257. assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
  1258. "AtomicCmpXchg failure argument shall be no stronger than the success "
  1259. "argument");
  1260. assert(FailureOrdering != AtomicOrdering::Release &&
  1261. FailureOrdering != AtomicOrdering::AcquireRelease &&
  1262. "AtomicCmpXchg failure ordering cannot include release semantics");
  1263. }
  1264. AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
  1265. AtomicOrdering SuccessOrdering,
  1266. AtomicOrdering FailureOrdering,
  1267. SyncScope::ID SSID,
  1268. Instruction *InsertBefore)
  1269. : Instruction(
  1270. StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
  1271. AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
  1272. OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
  1273. Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
  1274. }
  1275. AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
  1276. AtomicOrdering SuccessOrdering,
  1277. AtomicOrdering FailureOrdering,
  1278. SyncScope::ID SSID,
  1279. BasicBlock *InsertAtEnd)
  1280. : Instruction(
  1281. StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
  1282. AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
  1283. OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
  1284. Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
  1285. }
  1286. //===----------------------------------------------------------------------===//
  1287. // AtomicRMWInst Implementation
  1288. //===----------------------------------------------------------------------===//
  1289. void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
  1290. AtomicOrdering Ordering,
  1291. SyncScope::ID SSID) {
  1292. Op<0>() = Ptr;
  1293. Op<1>() = Val;
  1294. setOperation(Operation);
  1295. setOrdering(Ordering);
  1296. setSyncScopeID(SSID);
  1297. assert(getOperand(0) && getOperand(1) &&
  1298. "All operands must be non-null!");
  1299. assert(getOperand(0)->getType()->isPointerTy() &&
  1300. "Ptr must have pointer type!");
  1301. assert(getOperand(1)->getType() ==
  1302. cast<PointerType>(getOperand(0)->getType())->getElementType()
  1303. && "Ptr must be a pointer to Val type!");
  1304. assert(Ordering != AtomicOrdering::NotAtomic &&
  1305. "AtomicRMW instructions must be atomic!");
  1306. }
  1307. AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
  1308. AtomicOrdering Ordering,
  1309. SyncScope::ID SSID,
  1310. Instruction *InsertBefore)
  1311. : Instruction(Val->getType(), AtomicRMW,
  1312. OperandTraits<AtomicRMWInst>::op_begin(this),
  1313. OperandTraits<AtomicRMWInst>::operands(this),
  1314. InsertBefore) {
  1315. Init(Operation, Ptr, Val, Ordering, SSID);
  1316. }
  1317. AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
  1318. AtomicOrdering Ordering,
  1319. SyncScope::ID SSID,
  1320. BasicBlock *InsertAtEnd)
  1321. : Instruction(Val->getType(), AtomicRMW,
  1322. OperandTraits<AtomicRMWInst>::op_begin(this),
  1323. OperandTraits<AtomicRMWInst>::operands(this),
  1324. InsertAtEnd) {
  1325. Init(Operation, Ptr, Val, Ordering, SSID);
  1326. }
  1327. StringRef AtomicRMWInst::getOperationName(BinOp Op) {
  1328. switch (Op) {
  1329. case AtomicRMWInst::Xchg:
  1330. return "xchg";
  1331. case AtomicRMWInst::Add:
  1332. return "add";
  1333. case AtomicRMWInst::Sub:
  1334. return "sub";
  1335. case AtomicRMWInst::And:
  1336. return "and";
  1337. case AtomicRMWInst::Nand:
  1338. return "nand";
  1339. case AtomicRMWInst::Or:
  1340. return "or";
  1341. case AtomicRMWInst::Xor:
  1342. return "xor";
  1343. case AtomicRMWInst::Max:
  1344. return "max";
  1345. case AtomicRMWInst::Min:
  1346. return "min";
  1347. case AtomicRMWInst::UMax:
  1348. return "umax";
  1349. case AtomicRMWInst::UMin:
  1350. return "umin";
  1351. case AtomicRMWInst::FAdd:
  1352. return "fadd";
  1353. case AtomicRMWInst::FSub:
  1354. return "fsub";
  1355. case AtomicRMWInst::BAD_BINOP:
  1356. return "<invalid operation>";
  1357. }
  1358. llvm_unreachable("invalid atomicrmw operation");
  1359. }
  1360. //===----------------------------------------------------------------------===//
  1361. // FenceInst Implementation
  1362. //===----------------------------------------------------------------------===//
  1363. FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
  1364. SyncScope::ID SSID,
  1365. Instruction *InsertBefore)
  1366. : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
  1367. setOrdering(Ordering);
  1368. setSyncScopeID(SSID);
  1369. }
  1370. FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
  1371. SyncScope::ID SSID,
  1372. BasicBlock *InsertAtEnd)
  1373. : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
  1374. setOrdering(Ordering);
  1375. setSyncScopeID(SSID);
  1376. }
  1377. //===----------------------------------------------------------------------===//
  1378. // GetElementPtrInst Implementation
  1379. //===----------------------------------------------------------------------===//
  1380. void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
  1381. const Twine &Name) {
  1382. assert(getNumOperands() == 1 + IdxList.size() &&
  1383. "NumOperands not initialized?");
  1384. Op<0>() = Ptr;
  1385. llvm::copy(IdxList, op_begin() + 1);
  1386. setName(Name);
  1387. }
  1388. GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
  1389. : Instruction(GEPI.getType(), GetElementPtr,
  1390. OperandTraits<GetElementPtrInst>::op_end(this) -
  1391. GEPI.getNumOperands(),
  1392. GEPI.getNumOperands()),
  1393. SourceElementType(GEPI.SourceElementType),
  1394. ResultElementType(GEPI.ResultElementType) {
  1395. std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
  1396. SubclassOptionalData = GEPI.SubclassOptionalData;
  1397. }
  1398. /// getIndexedType - Returns the type of the element that would be accessed with
  1399. /// a gep instruction with the specified parameters.
  1400. ///
  1401. /// The Idxs pointer should point to a continuous piece of memory containing the
  1402. /// indices, either as Value* or uint64_t.
  1403. ///
  1404. /// A null type is returned if the indices are invalid for the specified
  1405. /// pointer type.
  1406. ///
  1407. template <typename IndexTy>
  1408. static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) {
  1409. // Handle the special case of the empty set index set, which is always valid.
  1410. if (IdxList.empty())
  1411. return Agg;
  1412. // If there is at least one index, the top level type must be sized, otherwise
  1413. // it cannot be 'stepped over'.
  1414. if (!Agg->isSized())
  1415. return nullptr;
  1416. unsigned CurIdx = 1;
  1417. for (; CurIdx != IdxList.size(); ++CurIdx) {
  1418. CompositeType *CT = dyn_cast<CompositeType>(Agg);
  1419. if (!CT || CT->isPointerTy()) return nullptr;
  1420. IndexTy Index = IdxList[CurIdx];
  1421. if (!CT->indexValid(Index)) return nullptr;
  1422. Agg = CT->getTypeAtIndex(Index);
  1423. }
  1424. return CurIdx == IdxList.size() ? Agg : nullptr;
  1425. }
  1426. Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
  1427. return getIndexedTypeInternal(Ty, IdxList);
  1428. }
  1429. Type *GetElementPtrInst::getIndexedType(Type *Ty,
  1430. ArrayRef<Constant *> IdxList) {
  1431. return getIndexedTypeInternal(Ty, IdxList);
  1432. }
  1433. Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
  1434. return getIndexedTypeInternal(Ty, IdxList);
  1435. }
  1436. /// hasAllZeroIndices - Return true if all of the indices of this GEP are
  1437. /// zeros. If so, the result pointer and the first operand have the same
  1438. /// value, just potentially different types.
  1439. bool GetElementPtrInst::hasAllZeroIndices() const {
  1440. for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
  1441. if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
  1442. if (!CI->isZero()) return false;
  1443. } else {
  1444. return false;
  1445. }
  1446. }
  1447. return true;
  1448. }
  1449. /// hasAllConstantIndices - Return true if all of the indices of this GEP are
  1450. /// constant integers. If so, the result pointer and the first operand have
  1451. /// a constant offset between them.
  1452. bool GetElementPtrInst::hasAllConstantIndices() const {
  1453. for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
  1454. if (!isa<ConstantInt>(getOperand(i)))
  1455. return false;
  1456. }
  1457. return true;
  1458. }
  1459. void GetElementPtrInst::setIsInBounds(bool B) {
  1460. cast<GEPOperator>(this)->setIsInBounds(B);
  1461. }
  1462. bool GetElementPtrInst::isInBounds() const {
  1463. return cast<GEPOperator>(this)->isInBounds();
  1464. }
  1465. bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
  1466. APInt &Offset) const {
  1467. // Delegate to the generic GEPOperator implementation.
  1468. return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
  1469. }
  1470. //===----------------------------------------------------------------------===//
  1471. // ExtractElementInst Implementation
  1472. //===----------------------------------------------------------------------===//
  1473. ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
  1474. const Twine &Name,
  1475. Instruction *InsertBef)
  1476. : Instruction(cast<VectorType>(Val->getType())->getElementType(),
  1477. ExtractElement,
  1478. OperandTraits<ExtractElementInst>::op_begin(this),
  1479. 2, InsertBef) {
  1480. assert(isValidOperands(Val, Index) &&
  1481. "Invalid extractelement instruction operands!");
  1482. Op<0>() = Val;
  1483. Op<1>() = Index;
  1484. setName(Name);
  1485. }
  1486. ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
  1487. const Twine &Name,
  1488. BasicBlock *InsertAE)
  1489. : Instruction(cast<VectorType>(Val->getType())->getElementType(),
  1490. ExtractElement,
  1491. OperandTraits<ExtractElementInst>::op_begin(this),
  1492. 2, InsertAE) {
  1493. assert(isValidOperands(Val, Index) &&
  1494. "Invalid extractelement instruction operands!");
  1495. Op<0>() = Val;
  1496. Op<1>() = Index;
  1497. setName(Name);
  1498. }
  1499. bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
  1500. if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
  1501. return false;
  1502. return true;
  1503. }
  1504. //===----------------------------------------------------------------------===//
  1505. // InsertElementInst Implementation
  1506. //===----------------------------------------------------------------------===//
  1507. InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
  1508. const Twine &Name,
  1509. Instruction *InsertBef)
  1510. : Instruction(Vec->getType(), InsertElement,
  1511. OperandTraits<InsertElementInst>::op_begin(this),
  1512. 3, InsertBef) {
  1513. assert(isValidOperands(Vec, Elt, Index) &&
  1514. "Invalid insertelement instruction operands!");
  1515. Op<0>() = Vec;
  1516. Op<1>() = Elt;
  1517. Op<2>() = Index;
  1518. setName(Name);
  1519. }
  1520. InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
  1521. const Twine &Name,
  1522. BasicBlock *InsertAE)
  1523. : Instruction(Vec->getType(), InsertElement,
  1524. OperandTraits<InsertElementInst>::op_begin(this),
  1525. 3, InsertAE) {
  1526. assert(isValidOperands(Vec, Elt, Index) &&
  1527. "Invalid insertelement instruction operands!");
  1528. Op<0>() = Vec;
  1529. Op<1>() = Elt;
  1530. Op<2>() = Index;
  1531. setName(Name);
  1532. }
  1533. bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
  1534. const Value *Index) {
  1535. if (!Vec->getType()->isVectorTy())
  1536. return false; // First operand of insertelement must be vector type.
  1537. if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
  1538. return false;// Second operand of insertelement must be vector element type.
  1539. if (!Index->getType()->isIntegerTy())
  1540. return false; // Third operand of insertelement must be i32.
  1541. return true;
  1542. }
  1543. //===----------------------------------------------------------------------===//
  1544. // ShuffleVectorInst Implementation
  1545. //===----------------------------------------------------------------------===//
  1546. ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
  1547. const Twine &Name,
  1548. Instruction *InsertBefore)
  1549. : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
  1550. cast<VectorType>(Mask->getType())->getNumElements()),
  1551. ShuffleVector,
  1552. OperandTraits<ShuffleVectorInst>::op_begin(this),
  1553. OperandTraits<ShuffleVectorInst>::operands(this),
  1554. InsertBefore) {
  1555. assert(isValidOperands(V1, V2, Mask) &&
  1556. "Invalid shuffle vector instruction operands!");
  1557. Op<0>() = V1;
  1558. Op<1>() = V2;
  1559. Op<2>() = Mask;
  1560. setName(Name);
  1561. }
  1562. ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
  1563. const Twine &Name,
  1564. BasicBlock *InsertAtEnd)
  1565. : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
  1566. cast<VectorType>(Mask->getType())->getNumElements()),
  1567. ShuffleVector,
  1568. OperandTraits<ShuffleVectorInst>::op_begin(this),
  1569. OperandTraits<ShuffleVectorInst>::operands(this),
  1570. InsertAtEnd) {
  1571. assert(isValidOperands(V1, V2, Mask) &&
  1572. "Invalid shuffle vector instruction operands!");
  1573. Op<0>() = V1;
  1574. Op<1>() = V2;
  1575. Op<2>() = Mask;
  1576. setName(Name);
  1577. }
  1578. void ShuffleVectorInst::commute() {
  1579. int NumOpElts = Op<0>()->getType()->getVectorNumElements();
  1580. int NumMaskElts = getMask()->getType()->getVectorNumElements();
  1581. SmallVector<Constant*, 16> NewMask(NumMaskElts);
  1582. Type *Int32Ty = Type::getInt32Ty(getContext());
  1583. for (int i = 0; i != NumMaskElts; ++i) {
  1584. int MaskElt = getMaskValue(i);
  1585. if (MaskElt == -1) {
  1586. NewMask[i] = UndefValue::get(Int32Ty);
  1587. continue;
  1588. }
  1589. assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
  1590. MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
  1591. NewMask[i] = ConstantInt::get(Int32Ty, MaskElt);
  1592. }
  1593. Op<2>() = ConstantVector::get(NewMask);
  1594. Op<0>().swap(Op<1>());
  1595. }
  1596. bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
  1597. const Value *Mask) {
  1598. // V1 and V2 must be vectors of the same type.
  1599. if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
  1600. return false;
  1601. // Mask must be vector of i32.
  1602. auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
  1603. if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
  1604. return false;
  1605. // Check to see if Mask is valid.
  1606. if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
  1607. return true;
  1608. if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
  1609. unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
  1610. for (Value *Op : MV->operands()) {
  1611. if (auto *CI = dyn_cast<ConstantInt>(Op)) {
  1612. if (CI->uge(V1Size*2))
  1613. return false;
  1614. } else if (!isa<UndefValue>(Op)) {
  1615. return false;
  1616. }
  1617. }
  1618. return true;
  1619. }
  1620. if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
  1621. unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
  1622. for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
  1623. if (CDS->getElementAsInteger(i) >= V1Size*2)
  1624. return false;
  1625. return true;
  1626. }
  1627. // The bitcode reader can create a place holder for a forward reference
  1628. // used as the shuffle mask. When this occurs, the shuffle mask will
  1629. // fall into this case and fail. To avoid this error, do this bit of
  1630. // ugliness to allow such a mask pass.
  1631. if (const auto *CE = dyn_cast<ConstantExpr>(Mask))
  1632. if (CE->getOpcode() == Instruction::UserOp1)
  1633. return true;
  1634. return false;
  1635. }
  1636. int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) {
  1637. assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
  1638. if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask))
  1639. return CDS->getElementAsInteger(i);
  1640. Constant *C = Mask->getAggregateElement(i);
  1641. if (isa<UndefValue>(C))
  1642. return -1;
  1643. return cast<ConstantInt>(C)->getZExtValue();
  1644. }
  1645. void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
  1646. SmallVectorImpl<int> &Result) {
  1647. unsigned NumElts = Mask->getType()->getVectorNumElements();
  1648. if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
  1649. for (unsigned i = 0; i != NumElts; ++i)
  1650. Result.push_back(CDS->getElementAsInteger(i));
  1651. return;
  1652. }
  1653. for (unsigned i = 0; i != NumElts; ++i) {
  1654. Constant *C = Mask->getAggregateElement(i);
  1655. Result.push_back(isa<UndefValue>(C) ? -1 :
  1656. cast<ConstantInt>(C)->getZExtValue());
  1657. }
  1658. }
  1659. static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
  1660. assert(!Mask.empty() && "Shuffle mask must contain elements");
  1661. bool UsesLHS = false;
  1662. bool UsesRHS = false;
  1663. for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
  1664. if (Mask[i] == -1)
  1665. continue;
  1666. assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) &&
  1667. "Out-of-bounds shuffle mask element");
  1668. UsesLHS |= (Mask[i] < NumOpElts);
  1669. UsesRHS |= (Mask[i] >= NumOpElts);
  1670. if (UsesLHS && UsesRHS)
  1671. return false;
  1672. }
  1673. assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source");
  1674. return true;
  1675. }
  1676. bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
  1677. // We don't have vector operand size information, so assume operands are the
  1678. // same size as the mask.
  1679. return isSingleSourceMaskImpl(Mask, Mask.size());
  1680. }
  1681. static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
  1682. if (!isSingleSourceMaskImpl(Mask, NumOpElts))
  1683. return false;
  1684. for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
  1685. if (Mask[i] == -1)
  1686. continue;
  1687. if (Mask[i] != i && Mask[i] != (NumOpElts + i))
  1688. return false;
  1689. }
  1690. return true;
  1691. }
  1692. bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) {
  1693. // We don't have vector operand size information, so assume operands are the
  1694. // same size as the mask.
  1695. return isIdentityMaskImpl(Mask, Mask.size());
  1696. }
  1697. bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) {
  1698. if (!isSingleSourceMask(Mask))
  1699. return false;
  1700. for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
  1701. if (Mask[i] == -1)
  1702. continue;
  1703. if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
  1704. return false;
  1705. }
  1706. return true;
  1707. }
  1708. bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) {
  1709. if (!isSingleSourceMask(Mask))
  1710. return false;
  1711. for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
  1712. if (Mask[i] == -1)
  1713. continue;
  1714. if (Mask[i] != 0 && Mask[i] != NumElts)
  1715. return false;
  1716. }
  1717. return true;
  1718. }
  1719. bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) {
  1720. // Select is differentiated from identity. It requires using both sources.
  1721. if (isSingleSourceMask(Mask))
  1722. return false;
  1723. for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
  1724. if (Mask[i] == -1)
  1725. continue;
  1726. if (Mask[i] != i && Mask[i] != (NumElts + i))
  1727. return false;
  1728. }
  1729. return true;
  1730. }
  1731. bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) {
  1732. // Example masks that will return true:
  1733. // v1 = <a, b, c, d>
  1734. // v2 = <e, f, g, h>
  1735. // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
  1736. // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
  1737. // 1. The number of elements in the mask must be a power-of-2 and at least 2.
  1738. int NumElts = Mask.size();
  1739. if (NumElts < 2 || !isPowerOf2_32(NumElts))
  1740. return false;
  1741. // 2. The first element of the mask must be either a 0 or a 1.
  1742. if (Mask[0] != 0 && Mask[0] != 1)
  1743. return false;
  1744. // 3. The difference between the first 2 elements must be equal to the
  1745. // number of elements in the mask.
  1746. if ((Mask[1] - Mask[0]) != NumElts)
  1747. return false;
  1748. // 4. The difference between consecutive even-numbered and odd-numbered
  1749. // elements must be equal to 2.
  1750. for (int i = 2; i < NumElts; ++i) {
  1751. int MaskEltVal = Mask[i];
  1752. if (MaskEltVal == -1)
  1753. return false;
  1754. int MaskEltPrevVal = Mask[i - 2];
  1755. if (MaskEltVal - MaskEltPrevVal != 2)
  1756. return false;
  1757. }
  1758. return true;
  1759. }
  1760. bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
  1761. int NumSrcElts, int &Index) {
  1762. // Must extract from a single source.
  1763. if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
  1764. return false;
  1765. // Must be smaller (else this is an Identity shuffle).
  1766. if (NumSrcElts <= (int)Mask.size())
  1767. return false;
  1768. // Find start of extraction, accounting that we may start with an UNDEF.
  1769. int SubIndex = -1;
  1770. for (int i = 0, e = Mask.size(); i != e; ++i) {
  1771. int M = Mask[i];
  1772. if (M < 0)
  1773. continue;
  1774. int Offset = (M % NumSrcElts) - i;
  1775. if (0 <= SubIndex && SubIndex != Offset)
  1776. return false;
  1777. SubIndex = Offset;
  1778. }
  1779. if (0 <= SubIndex) {
  1780. Index = SubIndex;
  1781. return true;
  1782. }
  1783. return false;
  1784. }
  1785. bool ShuffleVectorInst::isIdentityWithPadding() const {
  1786. int NumOpElts = Op<0>()->getType()->getVectorNumElements();
  1787. int NumMaskElts = getType()->getVectorNumElements();
  1788. if (NumMaskElts <= NumOpElts)
  1789. return false;
  1790. // The first part of the mask must choose elements from exactly 1 source op.
  1791. SmallVector<int, 16> Mask = getShuffleMask();
  1792. if (!isIdentityMaskImpl(Mask, NumOpElts))
  1793. return false;
  1794. // All extending must be with undef elements.
  1795. for (int i = NumOpElts; i < NumMaskElts; ++i)
  1796. if (Mask[i] != -1)
  1797. return false;
  1798. return true;
  1799. }
  1800. bool ShuffleVectorInst::isIdentityWithExtract() const {
  1801. int NumOpElts = Op<0>()->getType()->getVectorNumElements();
  1802. int NumMaskElts = getType()->getVectorNumElements();
  1803. if (NumMaskElts >= NumOpElts)
  1804. return false;
  1805. return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
  1806. }
  1807. bool ShuffleVectorInst::isConcat() const {
  1808. // Vector concatenation is differentiated from identity with padding.
  1809. if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
  1810. return false;
  1811. int NumOpElts = Op<0>()->getType()->getVectorNumElements();
  1812. int NumMaskElts = getType()->getVectorNumElements();
  1813. if (NumMaskElts != NumOpElts * 2)
  1814. return false;
  1815. // Use the mask length rather than the operands' vector lengths here. We
  1816. // already know that the shuffle returns a vector twice as long as the inputs,
  1817. // and neither of the inputs are undef vectors. If the mask picks consecutive
  1818. // elements from both inputs, then this is a concatenation of the inputs.
  1819. return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
  1820. }
  1821. //===----------------------------------------------------------------------===//
  1822. // InsertValueInst Class
  1823. //===----------------------------------------------------------------------===//
  1824. void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
  1825. const Twine &Name) {
  1826. assert(getNumOperands() == 2 && "NumOperands not initialized?");
  1827. // There's no fundamental reason why we require at least one index
  1828. // (other than weirdness with &*IdxBegin being invalid; see
  1829. // getelementptr's init routine for example). But there's no
  1830. // present need to support it.
  1831. assert(!Idxs.empty() && "InsertValueInst must have at least one index");
  1832. assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
  1833. Val->getType() && "Inserted value must match indexed type!");
  1834. Op<0>() = Agg;
  1835. Op<1>() = Val;
  1836. Indices.append(Idxs.begin(), Idxs.end());
  1837. setName(Name);
  1838. }
  1839. InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
  1840. : Instruction(IVI.getType(), InsertValue,
  1841. OperandTraits<InsertValueInst>::op_begin(this), 2),
  1842. Indices(IVI.Indices) {
  1843. Op<0>() = IVI.getOperand(0);
  1844. Op<1>() = IVI.getOperand(1);
  1845. SubclassOptionalData = IVI.SubclassOptionalData;
  1846. }
  1847. //===----------------------------------------------------------------------===//
  1848. // ExtractValueInst Class
  1849. //===----------------------------------------------------------------------===//
  1850. void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
  1851. assert(getNumOperands() == 1 && "NumOperands not initialized?");
  1852. // There's no fundamental reason why we require at least one index.
  1853. // But there's no present need to support it.
  1854. assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
  1855. Indices.append(Idxs.begin(), Idxs.end());
  1856. setName(Name);
  1857. }
  1858. ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
  1859. : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
  1860. Indices(EVI.Indices) {
  1861. SubclassOptionalData = EVI.SubclassOptionalData;
  1862. }
  1863. // getIndexedType - Returns the type of the element that would be extracted
  1864. // with an extractvalue instruction with the specified parameters.
  1865. //
  1866. // A null type is returned if the indices are invalid for the specified
  1867. // pointer type.
  1868. //
  1869. Type *ExtractValueInst::getIndexedType(Type *Agg,
  1870. ArrayRef<unsigned> Idxs) {
  1871. for (unsigned Index : Idxs) {
  1872. // We can't use CompositeType::indexValid(Index) here.
  1873. // indexValid() always returns true for arrays because getelementptr allows
  1874. // out-of-bounds indices. Since we don't allow those for extractvalue and
  1875. // insertvalue we need to check array indexing manually.
  1876. // Since the only other types we can index into are struct types it's just
  1877. // as easy to check those manually as well.
  1878. if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
  1879. if (Index >= AT->getNumElements())
  1880. return nullptr;
  1881. } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
  1882. if (Index >= ST->getNumElements())
  1883. return nullptr;
  1884. } else {
  1885. // Not a valid type to index into.
  1886. return nullptr;
  1887. }
  1888. Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
  1889. }
  1890. return const_cast<Type*>(Agg);
  1891. }
  1892. //===----------------------------------------------------------------------===//
  1893. // UnaryOperator Class
  1894. //===----------------------------------------------------------------------===//
  1895. UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
  1896. Type *Ty, const Twine &Name,
  1897. Instruction *InsertBefore)
  1898. : UnaryInstruction(Ty, iType, S, InsertBefore) {
  1899. Op<0>() = S;
  1900. setName(Name);
  1901. AssertOK();
  1902. }
  1903. UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
  1904. Type *Ty, const Twine &Name,
  1905. BasicBlock *InsertAtEnd)
  1906. : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
  1907. Op<0>() = S;
  1908. setName(Name);
  1909. AssertOK();
  1910. }
  1911. UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
  1912. const Twine &Name,
  1913. Instruction *InsertBefore) {
  1914. return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
  1915. }
  1916. UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
  1917. const Twine &Name,
  1918. BasicBlock *InsertAtEnd) {
  1919. UnaryOperator *Res = Create(Op, S, Name);
  1920. InsertAtEnd->getInstList().push_back(Res);
  1921. return Res;
  1922. }
  1923. void UnaryOperator::AssertOK() {
  1924. Value *LHS = getOperand(0);
  1925. (void)LHS; // Silence warnings.
  1926. #ifndef NDEBUG
  1927. switch (getOpcode()) {
  1928. case FNeg:
  1929. assert(getType() == LHS->getType() &&
  1930. "Unary operation should return same type as operand!");
  1931. assert(getType()->isFPOrFPVectorTy() &&
  1932. "Tried to create a floating-point operation on a "
  1933. "non-floating-point type!");
  1934. break;
  1935. default: llvm_unreachable("Invalid opcode provided");
  1936. }
  1937. #endif
  1938. }
  1939. //===----------------------------------------------------------------------===//
  1940. // BinaryOperator Class
  1941. //===----------------------------------------------------------------------===//
  1942. BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
  1943. Type *Ty, const Twine &Name,
  1944. Instruction *InsertBefore)
  1945. : Instruction(Ty, iType,
  1946. OperandTraits<BinaryOperator>::op_begin(this),
  1947. OperandTraits<BinaryOperator>::operands(this),
  1948. InsertBefore) {
  1949. Op<0>() = S1;
  1950. Op<1>() = S2;
  1951. setName(Name);
  1952. AssertOK();
  1953. }
  1954. BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
  1955. Type *Ty, const Twine &Name,
  1956. BasicBlock *InsertAtEnd)
  1957. : Instruction(Ty, iType,
  1958. OperandTraits<BinaryOperator>::op_begin(this),
  1959. OperandTraits<BinaryOperator>::operands(this),
  1960. InsertAtEnd) {
  1961. Op<0>() = S1;
  1962. Op<1>() = S2;
  1963. setName(Name);
  1964. AssertOK();
  1965. }
  1966. void BinaryOperator::AssertOK() {
  1967. Value *LHS = getOperand(0), *RHS = getOperand(1);
  1968. (void)LHS; (void)RHS; // Silence warnings.
  1969. assert(LHS->getType() == RHS->getType() &&
  1970. "Binary operator operand types must match!");
  1971. #ifndef NDEBUG
  1972. switch (getOpcode()) {
  1973. case Add: case Sub:
  1974. case Mul:
  1975. assert(getType() == LHS->getType() &&
  1976. "Arithmetic operation should return same type as operands!");
  1977. assert(getType()->isIntOrIntVectorTy() &&
  1978. "Tried to create an integer operation on a non-integer type!");
  1979. break;
  1980. case FAdd: case FSub:
  1981. case FMul:
  1982. assert(getType() == LHS->getType() &&
  1983. "Arithmetic operation should return same type as operands!");
  1984. assert(getType()->isFPOrFPVectorTy() &&
  1985. "Tried to create a floating-point operation on a "
  1986. "non-floating-point type!");
  1987. break;
  1988. case UDiv:
  1989. case SDiv:
  1990. assert(getType() == LHS->getType() &&
  1991. "Arithmetic operation should return same type as operands!");
  1992. assert(getType()->isIntOrIntVectorTy() &&
  1993. "Incorrect operand type (not integer) for S/UDIV");
  1994. break;
  1995. case FDiv:
  1996. assert(getType() == LHS->getType() &&
  1997. "Arithmetic operation should return same type as operands!");
  1998. assert(getType()->isFPOrFPVectorTy() &&
  1999. "Incorrect operand type (not floating point) for FDIV");
  2000. break;
  2001. case URem:
  2002. case SRem:
  2003. assert(getType() == LHS->getType() &&
  2004. "Arithmetic operation should return same type as operands!");
  2005. assert(getType()->isIntOrIntVectorTy() &&
  2006. "Incorrect operand type (not integer) for S/UREM");
  2007. break;
  2008. case FRem:
  2009. assert(getType() == LHS->getType() &&
  2010. "Arithmetic operation should return same type as operands!");
  2011. assert(getType()->isFPOrFPVectorTy() &&
  2012. "Incorrect operand type (not floating point) for FREM");
  2013. break;
  2014. case Shl:
  2015. case LShr:
  2016. case AShr:
  2017. assert(getType() == LHS->getType() &&
  2018. "Shift operation should return same type as operands!");
  2019. assert(getType()->isIntOrIntVectorTy() &&
  2020. "Tried to create a shift operation on a non-integral type!");
  2021. break;
  2022. case And: case Or:
  2023. case Xor:
  2024. assert(getType() == LHS->getType() &&
  2025. "Logical operation should return same type as operands!");
  2026. assert(getType()->isIntOrIntVectorTy() &&
  2027. "Tried to create a logical operation on a non-integral type!");
  2028. break;
  2029. default: llvm_unreachable("Invalid opcode provided");
  2030. }
  2031. #endif
  2032. }
  2033. BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
  2034. const Twine &Name,
  2035. Instruction *InsertBefore) {
  2036. assert(S1->getType() == S2->getType() &&
  2037. "Cannot create binary operator with two operands of differing type!");
  2038. return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
  2039. }
  2040. BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
  2041. const Twine &Name,
  2042. BasicBlock *InsertAtEnd) {
  2043. BinaryOperator *Res = Create(Op, S1, S2, Name);
  2044. InsertAtEnd->getInstList().push_back(Res);
  2045. return Res;
  2046. }
  2047. BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
  2048. Instruction *InsertBefore) {
  2049. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2050. return new BinaryOperator(Instruction::Sub,
  2051. zero, Op,
  2052. Op->getType(), Name, InsertBefore);
  2053. }
  2054. BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
  2055. BasicBlock *InsertAtEnd) {
  2056. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2057. return new BinaryOperator(Instruction::Sub,
  2058. zero, Op,
  2059. Op->getType(), Name, InsertAtEnd);
  2060. }
  2061. BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
  2062. Instruction *InsertBefore) {
  2063. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2064. return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
  2065. }
  2066. BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
  2067. BasicBlock *InsertAtEnd) {
  2068. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2069. return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
  2070. }
  2071. BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
  2072. Instruction *InsertBefore) {
  2073. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2074. return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
  2075. }
  2076. BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
  2077. BasicBlock *InsertAtEnd) {
  2078. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2079. return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
  2080. }
  2081. BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
  2082. Instruction *InsertBefore) {
  2083. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2084. return new BinaryOperator(Instruction::FSub, zero, Op,
  2085. Op->getType(), Name, InsertBefore);
  2086. }
  2087. BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
  2088. BasicBlock *InsertAtEnd) {
  2089. Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
  2090. return new BinaryOperator(Instruction::FSub, zero, Op,
  2091. Op->getType(), Name, InsertAtEnd);
  2092. }
  2093. BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
  2094. Instruction *InsertBefore) {
  2095. Constant *C = Constant::getAllOnesValue(Op->getType());
  2096. return new BinaryOperator(Instruction::Xor, Op, C,
  2097. Op->getType(), Name, InsertBefore);
  2098. }
  2099. BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
  2100. BasicBlock *InsertAtEnd) {
  2101. Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
  2102. return new BinaryOperator(Instruction::Xor, Op, AllOnes,
  2103. Op->getType(), Name, InsertAtEnd);
  2104. }
  2105. // Exchange the two operands to this instruction. This instruction is safe to
  2106. // use on any binary instruction and does not modify the semantics of the
  2107. // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
  2108. // is changed.
  2109. bool BinaryOperator::swapOperands() {
  2110. if (!isCommutative())
  2111. return true; // Can't commute operands
  2112. Op<0>().swap(Op<1>());
  2113. return false;
  2114. }
  2115. //===----------------------------------------------------------------------===//
  2116. // FPMathOperator Class
  2117. //===----------------------------------------------------------------------===//
  2118. float FPMathOperator::getFPAccuracy() const {
  2119. const MDNode *MD =
  2120. cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
  2121. if (!MD)
  2122. return 0.0;
  2123. ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
  2124. return Accuracy->getValueAPF().convertToFloat();
  2125. }
  2126. //===----------------------------------------------------------------------===//
  2127. // CastInst Class
  2128. //===----------------------------------------------------------------------===//
  2129. // Just determine if this cast only deals with integral->integral conversion.
  2130. bool CastInst::isIntegerCast() const {
  2131. switch (getOpcode()) {
  2132. default: return false;
  2133. case Instruction::ZExt:
  2134. case Instruction::SExt:
  2135. case Instruction::Trunc:
  2136. return true;
  2137. case Instruction::BitCast:
  2138. return getOperand(0)->getType()->isIntegerTy() &&
  2139. getType()->isIntegerTy();
  2140. }
  2141. }
  2142. bool CastInst::isLosslessCast() const {
  2143. // Only BitCast can be lossless, exit fast if we're not BitCast
  2144. if (getOpcode() != Instruction::BitCast)
  2145. return false;
  2146. // Identity cast is always lossless
  2147. Type *SrcTy = getOperand(0)->getType();
  2148. Type *DstTy = getType();
  2149. if (SrcTy == DstTy)
  2150. return true;
  2151. // Pointer to pointer is always lossless.
  2152. if (SrcTy->isPointerTy())
  2153. return DstTy->isPointerTy();
  2154. return false; // Other types have no identity values
  2155. }
  2156. /// This function determines if the CastInst does not require any bits to be
  2157. /// changed in order to effect the cast. Essentially, it identifies cases where
  2158. /// no code gen is necessary for the cast, hence the name no-op cast. For
  2159. /// example, the following are all no-op casts:
  2160. /// # bitcast i32* %x to i8*
  2161. /// # bitcast <2 x i32> %x to <4 x i16>
  2162. /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
  2163. /// Determine if the described cast is a no-op.
  2164. bool CastInst::isNoopCast(Instruction::CastOps Opcode,
  2165. Type *SrcTy,
  2166. Type *DestTy,
  2167. const DataLayout &DL) {
  2168. switch (Opcode) {
  2169. default: llvm_unreachable("Invalid CastOp");
  2170. case Instruction::Trunc:
  2171. case Instruction::ZExt:
  2172. case Instruction::SExt:
  2173. case Instruction::FPTrunc:
  2174. case Instruction::FPExt:
  2175. case Instruction::UIToFP:
  2176. case Instruction::SIToFP:
  2177. case Instruction::FPToUI:
  2178. case Instruction::FPToSI:
  2179. case Instruction::AddrSpaceCast:
  2180. // TODO: Target informations may give a more accurate answer here.
  2181. return false;
  2182. case Instruction::BitCast:
  2183. return true; // BitCast never modifies bits.
  2184. case Instruction::PtrToInt:
  2185. return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
  2186. DestTy->getScalarSizeInBits();
  2187. case Instruction::IntToPtr:
  2188. return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
  2189. SrcTy->getScalarSizeInBits();
  2190. }
  2191. }
  2192. bool CastInst::isNoopCast(const DataLayout &DL) const {
  2193. return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
  2194. }
  2195. /// This function determines if a pair of casts can be eliminated and what
  2196. /// opcode should be used in the elimination. This assumes that there are two
  2197. /// instructions like this:
  2198. /// * %F = firstOpcode SrcTy %x to MidTy
  2199. /// * %S = secondOpcode MidTy %F to DstTy
  2200. /// The function returns a resultOpcode so these two casts can be replaced with:
  2201. /// * %Replacement = resultOpcode %SrcTy %x to DstTy
  2202. /// If no such cast is permitted, the function returns 0.
  2203. unsigned CastInst::isEliminableCastPair(
  2204. Instruction::CastOps firstOp, Instruction::CastOps secondOp,
  2205. Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
  2206. Type *DstIntPtrTy) {
  2207. // Define the 144 possibilities for these two cast instructions. The values
  2208. // in this matrix determine what to do in a given situation and select the
  2209. // case in the switch below. The rows correspond to firstOp, the columns
  2210. // correspond to secondOp. In looking at the table below, keep in mind
  2211. // the following cast properties:
  2212. //
  2213. // Size Compare Source Destination
  2214. // Operator Src ? Size Type Sign Type Sign
  2215. // -------- ------------ ------------------- ---------------------
  2216. // TRUNC > Integer Any Integral Any
  2217. // ZEXT < Integral Unsigned Integer Any
  2218. // SEXT < Integral Signed Integer Any
  2219. // FPTOUI n/a FloatPt n/a Integral Unsigned
  2220. // FPTOSI n/a FloatPt n/a Integral Signed
  2221. // UITOFP n/a Integral Unsigned FloatPt n/a
  2222. // SITOFP n/a Integral Signed FloatPt n/a
  2223. // FPTRUNC > FloatPt n/a FloatPt n/a
  2224. // FPEXT < FloatPt n/a FloatPt n/a
  2225. // PTRTOINT n/a Pointer n/a Integral Unsigned
  2226. // INTTOPTR n/a Integral Unsigned Pointer n/a
  2227. // BITCAST = FirstClass n/a FirstClass n/a
  2228. // ADDRSPCST n/a Pointer n/a Pointer n/a
  2229. //
  2230. // NOTE: some transforms are safe, but we consider them to be non-profitable.
  2231. // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
  2232. // into "fptoui double to i64", but this loses information about the range
  2233. // of the produced value (we no longer know the top-part is all zeros).
  2234. // Further this conversion is often much more expensive for typical hardware,
  2235. // and causes issues when building libgcc. We disallow fptosi+sext for the
  2236. // same reason.
  2237. const unsigned numCastOps =
  2238. Instruction::CastOpsEnd - Instruction::CastOpsBegin;
  2239. static const uint8_t CastResults[numCastOps][numCastOps] = {
  2240. // T F F U S F F P I B A -+
  2241. // R Z S P P I I T P 2 N T S |
  2242. // U E E 2 2 2 2 R E I T C C +- secondOp
  2243. // N X X U S F F N X N 2 V V |
  2244. // C T T I I P P C T T P T T -+
  2245. { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
  2246. { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
  2247. { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
  2248. { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
  2249. { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
  2250. { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
  2251. { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
  2252. { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
  2253. { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
  2254. { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
  2255. { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
  2256. { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
  2257. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
  2258. };
  2259. // TODO: This logic could be encoded into the table above and handled in the
  2260. // switch below.
  2261. // If either of the casts are a bitcast from scalar to vector, disallow the
  2262. // merging. However, any pair of bitcasts are allowed.
  2263. bool IsFirstBitcast = (firstOp == Instruction::BitCast);
  2264. bool IsSecondBitcast = (secondOp == Instruction::BitCast);
  2265. bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
  2266. // Check if any of the casts convert scalars <-> vectors.
  2267. if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
  2268. (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
  2269. if (!AreBothBitcasts)
  2270. return 0;
  2271. int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
  2272. [secondOp-Instruction::CastOpsBegin];
  2273. switch (ElimCase) {
  2274. case 0:
  2275. // Categorically disallowed.
  2276. return 0;
  2277. case 1:
  2278. // Allowed, use first cast's opcode.
  2279. return firstOp;
  2280. case 2:
  2281. // Allowed, use second cast's opcode.
  2282. return secondOp;
  2283. case 3:
  2284. // No-op cast in second op implies firstOp as long as the DestTy
  2285. // is integer and we are not converting between a vector and a
  2286. // non-vector type.
  2287. if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
  2288. return firstOp;
  2289. return 0;
  2290. case 4:
  2291. // No-op cast in second op implies firstOp as long as the DestTy
  2292. // is floating point.
  2293. if (DstTy->isFloatingPointTy())
  2294. return firstOp;
  2295. return 0;
  2296. case 5:
  2297. // No-op cast in first op implies secondOp as long as the SrcTy
  2298. // is an integer.
  2299. if (SrcTy->isIntegerTy())
  2300. return secondOp;
  2301. return 0;
  2302. case 6:
  2303. // No-op cast in first op implies secondOp as long as the SrcTy
  2304. // is a floating point.
  2305. if (SrcTy->isFloatingPointTy())
  2306. return secondOp;
  2307. return 0;
  2308. case 7: {
  2309. // Cannot simplify if address spaces are different!
  2310. if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
  2311. return 0;
  2312. unsigned MidSize = MidTy->getScalarSizeInBits();
  2313. // We can still fold this without knowing the actual sizes as long we
  2314. // know that the intermediate pointer is the largest possible
  2315. // pointer size.
  2316. // FIXME: Is this always true?
  2317. if (MidSize == 64)
  2318. return Instruction::BitCast;
  2319. // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
  2320. if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
  2321. return 0;
  2322. unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
  2323. if (MidSize >= PtrSize)
  2324. return Instruction::BitCast;
  2325. return 0;
  2326. }
  2327. case 8: {
  2328. // ext, trunc -> bitcast, if the SrcTy and DstTy are same size
  2329. // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
  2330. // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
  2331. unsigned SrcSize = SrcTy->getScalarSizeInBits();
  2332. unsigned DstSize = DstTy->getScalarSizeInBits();
  2333. if (SrcSize == DstSize)
  2334. return Instruction::BitCast;
  2335. else if (SrcSize < DstSize)
  2336. return firstOp;
  2337. return secondOp;
  2338. }
  2339. case 9:
  2340. // zext, sext -> zext, because sext can't sign extend after zext
  2341. return Instruction::ZExt;
  2342. case 11: {
  2343. // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
  2344. if (!MidIntPtrTy)
  2345. return 0;
  2346. unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
  2347. unsigned SrcSize = SrcTy->getScalarSizeInBits();
  2348. unsigned DstSize = DstTy->getScalarSizeInBits();
  2349. if (SrcSize <= PtrSize && SrcSize == DstSize)
  2350. return Instruction::BitCast;
  2351. return 0;
  2352. }
  2353. case 12:
  2354. // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
  2355. // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
  2356. if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
  2357. return Instruction::AddrSpaceCast;
  2358. return Instruction::BitCast;
  2359. case 13:
  2360. // FIXME: this state can be merged with (1), but the following assert
  2361. // is useful to check the correcteness of the sequence due to semantic
  2362. // change of bitcast.
  2363. assert(
  2364. SrcTy->isPtrOrPtrVectorTy() &&
  2365. MidTy->isPtrOrPtrVectorTy() &&
  2366. DstTy->isPtrOrPtrVectorTy() &&
  2367. SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
  2368. MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
  2369. "Illegal addrspacecast, bitcast sequence!");
  2370. // Allowed, use first cast's opcode
  2371. return firstOp;
  2372. case 14:
  2373. // bitcast, addrspacecast -> addrspacecast if the element type of
  2374. // bitcast's source is the same as that of addrspacecast's destination.
  2375. if (SrcTy->getScalarType()->getPointerElementType() ==
  2376. DstTy->getScalarType()->getPointerElementType())
  2377. return Instruction::AddrSpaceCast;
  2378. return 0;
  2379. case 15:
  2380. // FIXME: this state can be merged with (1), but the following assert
  2381. // is useful to check the correcteness of the sequence due to semantic
  2382. // change of bitcast.
  2383. assert(
  2384. SrcTy->isIntOrIntVectorTy() &&
  2385. MidTy->isPtrOrPtrVectorTy() &&
  2386. DstTy->isPtrOrPtrVectorTy() &&
  2387. MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
  2388. "Illegal inttoptr, bitcast sequence!");
  2389. // Allowed, use first cast's opcode
  2390. return firstOp;
  2391. case 16:
  2392. // FIXME: this state can be merged with (2), but the following assert
  2393. // is useful to check the correcteness of the sequence due to semantic
  2394. // change of bitcast.
  2395. assert(
  2396. SrcTy->isPtrOrPtrVectorTy() &&
  2397. MidTy->isPtrOrPtrVectorTy() &&
  2398. DstTy->isIntOrIntVectorTy() &&
  2399. SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
  2400. "Illegal bitcast, ptrtoint sequence!");
  2401. // Allowed, use second cast's opcode
  2402. return secondOp;
  2403. case 17:
  2404. // (sitofp (zext x)) -> (uitofp x)
  2405. return Instruction::UIToFP;
  2406. case 99:
  2407. // Cast combination can't happen (error in input). This is for all cases
  2408. // where the MidTy is not the same for the two cast instructions.
  2409. llvm_unreachable("Invalid Cast Combination");
  2410. default:
  2411. llvm_unreachable("Error in CastResults table!!!");
  2412. }
  2413. }
  2414. CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
  2415. const Twine &Name, Instruction *InsertBefore) {
  2416. assert(castIsValid(op, S, Ty) && "Invalid cast!");
  2417. // Construct and return the appropriate CastInst subclass
  2418. switch (op) {
  2419. case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
  2420. case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
  2421. case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
  2422. case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
  2423. case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
  2424. case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
  2425. case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
  2426. case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
  2427. case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
  2428. case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
  2429. case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
  2430. case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
  2431. case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
  2432. default: llvm_unreachable("Invalid opcode provided");
  2433. }
  2434. }
  2435. CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
  2436. const Twine &Name, BasicBlock *InsertAtEnd) {
  2437. assert(castIsValid(op, S, Ty) && "Invalid cast!");
  2438. // Construct and return the appropriate CastInst subclass
  2439. switch (op) {
  2440. case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
  2441. case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
  2442. case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
  2443. case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
  2444. case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
  2445. case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
  2446. case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
  2447. case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
  2448. case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
  2449. case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
  2450. case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
  2451. case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
  2452. case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
  2453. default: llvm_unreachable("Invalid opcode provided");
  2454. }
  2455. }
  2456. CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
  2457. const Twine &Name,
  2458. Instruction *InsertBefore) {
  2459. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  2460. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  2461. return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
  2462. }
  2463. CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
  2464. const Twine &Name,
  2465. BasicBlock *InsertAtEnd) {
  2466. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  2467. return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
  2468. return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
  2469. }
  2470. CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
  2471. const Twine &Name,
  2472. Instruction *InsertBefore) {
  2473. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  2474. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  2475. return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
  2476. }
  2477. CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
  2478. const Twine &Name,
  2479. BasicBlock *InsertAtEnd) {
  2480. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  2481. return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
  2482. return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
  2483. }
  2484. CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
  2485. const Twine &Name,
  2486. Instruction *InsertBefore) {
  2487. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  2488. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  2489. return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
  2490. }
  2491. CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
  2492. const Twine &Name,
  2493. BasicBlock *InsertAtEnd) {
  2494. if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
  2495. return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
  2496. return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
  2497. }
  2498. CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
  2499. const Twine &Name,
  2500. BasicBlock *InsertAtEnd) {
  2501. assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
  2502. assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
  2503. "Invalid cast");
  2504. assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
  2505. assert((!Ty->isVectorTy() ||
  2506. Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
  2507. "Invalid cast");
  2508. if (Ty->isIntOrIntVectorTy())
  2509. return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
  2510. return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
  2511. }
  2512. /// Create a BitCast or a PtrToInt cast instruction
  2513. CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
  2514. const Twine &Name,
  2515. Instruction *InsertBefore) {
  2516. assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
  2517. assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
  2518. "Invalid cast");
  2519. assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
  2520. assert((!Ty->isVectorTy() ||
  2521. Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
  2522. "Invalid cast");
  2523. if (Ty->isIntOrIntVectorTy())
  2524. return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
  2525. return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
  2526. }
  2527. CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
  2528. Value *S, Type *Ty,
  2529. const Twine &Name,
  2530. BasicBlock *InsertAtEnd) {
  2531. assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
  2532. assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
  2533. if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
  2534. return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
  2535. return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
  2536. }
  2537. CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
  2538. Value *S, Type *Ty,
  2539. const Twine &Name,
  2540. Instruction *InsertBefore) {
  2541. assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
  2542. assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
  2543. if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
  2544. return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
  2545. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  2546. }
  2547. CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
  2548. const Twine &Name,
  2549. Instruction *InsertBefore) {
  2550. if (S->getType()->isPointerTy() && Ty->isIntegerTy())
  2551. return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
  2552. if (S->getType()->isIntegerTy() && Ty->isPointerTy())
  2553. return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
  2554. return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
  2555. }
  2556. CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
  2557. bool isSigned, const Twine &Name,
  2558. Instruction *InsertBefore) {
  2559. assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
  2560. "Invalid integer cast");
  2561. unsigned SrcBits = C->getType()->getScalarSizeInBits();
  2562. unsigned DstBits = Ty->getScalarSizeInBits();
  2563. Instruction::CastOps opcode =
  2564. (SrcBits == DstBits ? Instruction::BitCast :
  2565. (SrcBits > DstBits ? Instruction::Trunc :
  2566. (isSigned ? Instruction::SExt : Instruction::ZExt)));
  2567. return Create(opcode, C, Ty, Name, InsertBefore);
  2568. }
  2569. CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
  2570. bool isSigned, const Twine &Name,
  2571. BasicBlock *InsertAtEnd) {
  2572. assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
  2573. "Invalid cast");
  2574. unsigned SrcBits = C->getType()->getScalarSizeInBits();
  2575. unsigned DstBits = Ty->getScalarSizeInBits();
  2576. Instruction::CastOps opcode =
  2577. (SrcBits == DstBits ? Instruction::BitCast :
  2578. (SrcBits > DstBits ? Instruction::Trunc :
  2579. (isSigned ? Instruction::SExt : Instruction::ZExt)));
  2580. return Create(opcode, C, Ty, Name, InsertAtEnd);
  2581. }
  2582. CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
  2583. const Twine &Name,
  2584. Instruction *InsertBefore) {
  2585. assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
  2586. "Invalid cast");
  2587. unsigned SrcBits = C->getType()->getScalarSizeInBits();
  2588. unsigned DstBits = Ty->getScalarSizeInBits();
  2589. Instruction::CastOps opcode =
  2590. (SrcBits == DstBits ? Instruction::BitCast :
  2591. (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
  2592. return Create(opcode, C, Ty, Name, InsertBefore);
  2593. }
  2594. CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
  2595. const Twine &Name,
  2596. BasicBlock *InsertAtEnd) {
  2597. assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
  2598. "Invalid cast");
  2599. unsigned SrcBits = C->getType()->getScalarSizeInBits();
  2600. unsigned DstBits = Ty->getScalarSizeInBits();
  2601. Instruction::CastOps opcode =
  2602. (SrcBits == DstBits ? Instruction::BitCast :
  2603. (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
  2604. return Create(opcode, C, Ty, Name, InsertAtEnd);
  2605. }
  2606. // Check whether it is valid to call getCastOpcode for these types.
  2607. // This routine must be kept in sync with getCastOpcode.
  2608. bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
  2609. if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
  2610. return false;
  2611. if (SrcTy == DestTy)
  2612. return true;
  2613. if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
  2614. if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
  2615. if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
  2616. // An element by element cast. Valid if casting the elements is valid.
  2617. SrcTy = SrcVecTy->getElementType();
  2618. DestTy = DestVecTy->getElementType();
  2619. }
  2620. // Get the bit sizes, we'll need these
  2621. unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
  2622. unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
  2623. // Run through the possibilities ...
  2624. if (DestTy->isIntegerTy()) { // Casting to integral
  2625. if (SrcTy->isIntegerTy()) // Casting from integral
  2626. return true;
  2627. if (SrcTy->isFloatingPointTy()) // Casting from floating pt
  2628. return true;
  2629. if (SrcTy->isVectorTy()) // Casting from vector
  2630. return DestBits == SrcBits;
  2631. // Casting from something else
  2632. return SrcTy->isPointerTy();
  2633. }
  2634. if (DestTy->isFloatingPointTy()) { // Casting to floating pt
  2635. if (SrcTy->isIntegerTy()) // Casting from integral
  2636. return true;
  2637. if (SrcTy->isFloatingPointTy()) // Casting from floating pt
  2638. return true;
  2639. if (SrcTy->isVectorTy()) // Casting from vector
  2640. return DestBits == SrcBits;
  2641. // Casting from something else
  2642. return false;
  2643. }
  2644. if (DestTy->isVectorTy()) // Casting to vector
  2645. return DestBits == SrcBits;
  2646. if (DestTy->isPointerTy()) { // Casting to pointer
  2647. if (SrcTy->isPointerTy()) // Casting from pointer
  2648. return true;
  2649. return SrcTy->isIntegerTy(); // Casting from integral
  2650. }
  2651. if (DestTy->isX86_MMXTy()) {
  2652. if (SrcTy->isVectorTy())
  2653. return DestBits == SrcBits; // 64-bit vector to MMX
  2654. return false;
  2655. } // Casting to something else
  2656. return false;
  2657. }
  2658. bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
  2659. if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
  2660. return false;
  2661. if (SrcTy == DestTy)
  2662. return true;
  2663. if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
  2664. if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
  2665. if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
  2666. // An element by element cast. Valid if casting the elements is valid.
  2667. SrcTy = SrcVecTy->getElementType();
  2668. DestTy = DestVecTy->getElementType();
  2669. }
  2670. }
  2671. }
  2672. if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
  2673. if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
  2674. return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
  2675. }
  2676. }
  2677. unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
  2678. unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
  2679. // Could still have vectors of pointers if the number of elements doesn't
  2680. // match
  2681. if (SrcBits == 0 || DestBits == 0)
  2682. return false;
  2683. if (SrcBits != DestBits)
  2684. return false;
  2685. if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
  2686. return false;
  2687. return true;
  2688. }
  2689. bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
  2690. const DataLayout &DL) {
  2691. // ptrtoint and inttoptr are not allowed on non-integral pointers
  2692. if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
  2693. if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
  2694. return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
  2695. !DL.isNonIntegralPointerType(PtrTy));
  2696. if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
  2697. if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
  2698. return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
  2699. !DL.isNonIntegralPointerType(PtrTy));
  2700. return isBitCastable(SrcTy, DestTy);
  2701. }
  2702. // Provide a way to get a "cast" where the cast opcode is inferred from the
  2703. // types and size of the operand. This, basically, is a parallel of the
  2704. // logic in the castIsValid function below. This axiom should hold:
  2705. // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
  2706. // should not assert in castIsValid. In other words, this produces a "correct"
  2707. // casting opcode for the arguments passed to it.
  2708. // This routine must be kept in sync with isCastable.
  2709. Instruction::CastOps
  2710. CastInst::getCastOpcode(
  2711. const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
  2712. Type *SrcTy = Src->getType();
  2713. assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
  2714. "Only first class types are castable!");
  2715. if (SrcTy == DestTy)
  2716. return BitCast;
  2717. // FIXME: Check address space sizes here
  2718. if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
  2719. if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
  2720. if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
  2721. // An element by element cast. Find the appropriate opcode based on the
  2722. // element types.
  2723. SrcTy = SrcVecTy->getElementType();
  2724. DestTy = DestVecTy->getElementType();
  2725. }
  2726. // Get the bit sizes, we'll need these
  2727. unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
  2728. unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
  2729. // Run through the possibilities ...
  2730. if (DestTy->isIntegerTy()) { // Casting to integral
  2731. if (SrcTy->isIntegerTy()) { // Casting from integral
  2732. if (DestBits < SrcBits)
  2733. return Trunc; // int -> smaller int
  2734. else if (DestBits > SrcBits) { // its an extension
  2735. if (SrcIsSigned)
  2736. return SExt; // signed -> SEXT
  2737. else
  2738. return ZExt; // unsigned -> ZEXT
  2739. } else {
  2740. return BitCast; // Same size, No-op cast
  2741. }
  2742. } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
  2743. if (DestIsSigned)
  2744. return FPToSI; // FP -> sint
  2745. else
  2746. return FPToUI; // FP -> uint
  2747. } else if (SrcTy->isVectorTy()) {
  2748. assert(DestBits == SrcBits &&
  2749. "Casting vector to integer of different width");
  2750. return BitCast; // Same size, no-op cast
  2751. } else {
  2752. assert(SrcTy->isPointerTy() &&
  2753. "Casting from a value that is not first-class type");
  2754. return PtrToInt; // ptr -> int
  2755. }
  2756. } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
  2757. if (SrcTy->isIntegerTy()) { // Casting from integral
  2758. if (SrcIsSigned)
  2759. return SIToFP; // sint -> FP
  2760. else
  2761. return UIToFP; // uint -> FP
  2762. } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
  2763. if (DestBits < SrcBits) {
  2764. return FPTrunc; // FP -> smaller FP
  2765. } else if (DestBits > SrcBits) {
  2766. return FPExt; // FP -> larger FP
  2767. } else {
  2768. return BitCast; // same size, no-op cast
  2769. }
  2770. } else if (SrcTy->isVectorTy()) {
  2771. assert(DestBits == SrcBits &&
  2772. "Casting vector to floating point of different width");
  2773. return BitCast; // same size, no-op cast
  2774. }
  2775. llvm_unreachable("Casting pointer or non-first class to float");
  2776. } else if (DestTy->isVectorTy()) {
  2777. assert(DestBits == SrcBits &&
  2778. "Illegal cast to vector (wrong type or size)");
  2779. return BitCast;
  2780. } else if (DestTy->isPointerTy()) {
  2781. if (SrcTy->isPointerTy()) {
  2782. if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
  2783. return AddrSpaceCast;
  2784. return BitCast; // ptr -> ptr
  2785. } else if (SrcTy->isIntegerTy()) {
  2786. return IntToPtr; // int -> ptr
  2787. }
  2788. llvm_unreachable("Casting pointer to other than pointer or int");
  2789. } else if (DestTy->isX86_MMXTy()) {
  2790. if (SrcTy->isVectorTy()) {
  2791. assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
  2792. return BitCast; // 64-bit vector to MMX
  2793. }
  2794. llvm_unreachable("Illegal cast to X86_MMX");
  2795. }
  2796. llvm_unreachable("Casting to type that is not first-class");
  2797. }
  2798. //===----------------------------------------------------------------------===//
  2799. // CastInst SubClass Constructors
  2800. //===----------------------------------------------------------------------===//
  2801. /// Check that the construction parameters for a CastInst are correct. This
  2802. /// could be broken out into the separate constructors but it is useful to have
  2803. /// it in one place and to eliminate the redundant code for getting the sizes
  2804. /// of the types involved.
  2805. bool
  2806. CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
  2807. // Check for type sanity on the arguments
  2808. Type *SrcTy = S->getType();
  2809. if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
  2810. SrcTy->isAggregateType() || DstTy->isAggregateType())
  2811. return false;
  2812. // Get the size of the types in bits, we'll need this later
  2813. unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
  2814. unsigned DstBitSize = DstTy->getScalarSizeInBits();
  2815. // If these are vector types, get the lengths of the vectors (using zero for
  2816. // scalar types means that checking that vector lengths match also checks that
  2817. // scalars are not being converted to vectors or vectors to scalars).
  2818. unsigned SrcLength = SrcTy->isVectorTy() ?
  2819. cast<VectorType>(SrcTy)->getNumElements() : 0;
  2820. unsigned DstLength = DstTy->isVectorTy() ?
  2821. cast<VectorType>(DstTy)->getNumElements() : 0;
  2822. // Switch on the opcode provided
  2823. switch (op) {
  2824. default: return false; // This is an input error
  2825. case Instruction::Trunc:
  2826. return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
  2827. SrcLength == DstLength && SrcBitSize > DstBitSize;
  2828. case Instruction::ZExt:
  2829. return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
  2830. SrcLength == DstLength && SrcBitSize < DstBitSize;
  2831. case Instruction::SExt:
  2832. return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
  2833. SrcLength == DstLength && SrcBitSize < DstBitSize;
  2834. case Instruction::FPTrunc:
  2835. return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
  2836. SrcLength == DstLength && SrcBitSize > DstBitSize;
  2837. case Instruction::FPExt:
  2838. return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
  2839. SrcLength == DstLength && SrcBitSize < DstBitSize;
  2840. case Instruction::UIToFP:
  2841. case Instruction::SIToFP:
  2842. return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
  2843. SrcLength == DstLength;
  2844. case Instruction::FPToUI:
  2845. case Instruction::FPToSI:
  2846. return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
  2847. SrcLength == DstLength;
  2848. case Instruction::PtrToInt:
  2849. if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
  2850. return false;
  2851. if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
  2852. if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
  2853. return false;
  2854. return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
  2855. case Instruction::IntToPtr:
  2856. if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
  2857. return false;
  2858. if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
  2859. if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
  2860. return false;
  2861. return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
  2862. case Instruction::BitCast: {
  2863. PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
  2864. PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
  2865. // BitCast implies a no-op cast of type only. No bits change.
  2866. // However, you can't cast pointers to anything but pointers.
  2867. if (!SrcPtrTy != !DstPtrTy)
  2868. return false;
  2869. // For non-pointer cases, the cast is okay if the source and destination bit
  2870. // widths are identical.
  2871. if (!SrcPtrTy)
  2872. return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
  2873. // If both are pointers then the address spaces must match.
  2874. if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
  2875. return false;
  2876. // A vector of pointers must have the same number of elements.
  2877. VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy);
  2878. VectorType *DstVecTy = dyn_cast<VectorType>(DstTy);
  2879. if (SrcVecTy && DstVecTy)
  2880. return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
  2881. if (SrcVecTy)
  2882. return SrcVecTy->getNumElements() == 1;
  2883. if (DstVecTy)
  2884. return DstVecTy->getNumElements() == 1;
  2885. return true;
  2886. }
  2887. case Instruction::AddrSpaceCast: {
  2888. PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
  2889. if (!SrcPtrTy)
  2890. return false;
  2891. PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
  2892. if (!DstPtrTy)
  2893. return false;
  2894. if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
  2895. return false;
  2896. if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
  2897. if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
  2898. return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
  2899. return false;
  2900. }
  2901. return true;
  2902. }
  2903. }
  2904. }
  2905. TruncInst::TruncInst(
  2906. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2907. ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
  2908. assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
  2909. }
  2910. TruncInst::TruncInst(
  2911. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2912. ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
  2913. assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
  2914. }
  2915. ZExtInst::ZExtInst(
  2916. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2917. ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
  2918. assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
  2919. }
  2920. ZExtInst::ZExtInst(
  2921. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2922. ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
  2923. assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
  2924. }
  2925. SExtInst::SExtInst(
  2926. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2927. ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
  2928. assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
  2929. }
  2930. SExtInst::SExtInst(
  2931. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2932. ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
  2933. assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
  2934. }
  2935. FPTruncInst::FPTruncInst(
  2936. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2937. ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
  2938. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
  2939. }
  2940. FPTruncInst::FPTruncInst(
  2941. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2942. ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
  2943. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
  2944. }
  2945. FPExtInst::FPExtInst(
  2946. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2947. ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
  2948. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
  2949. }
  2950. FPExtInst::FPExtInst(
  2951. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2952. ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
  2953. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
  2954. }
  2955. UIToFPInst::UIToFPInst(
  2956. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2957. ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
  2958. assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
  2959. }
  2960. UIToFPInst::UIToFPInst(
  2961. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2962. ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
  2963. assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
  2964. }
  2965. SIToFPInst::SIToFPInst(
  2966. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2967. ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
  2968. assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
  2969. }
  2970. SIToFPInst::SIToFPInst(
  2971. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2972. ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
  2973. assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
  2974. }
  2975. FPToUIInst::FPToUIInst(
  2976. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2977. ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
  2978. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
  2979. }
  2980. FPToUIInst::FPToUIInst(
  2981. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2982. ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
  2983. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
  2984. }
  2985. FPToSIInst::FPToSIInst(
  2986. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2987. ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
  2988. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
  2989. }
  2990. FPToSIInst::FPToSIInst(
  2991. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  2992. ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
  2993. assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
  2994. }
  2995. PtrToIntInst::PtrToIntInst(
  2996. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  2997. ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
  2998. assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
  2999. }
  3000. PtrToIntInst::PtrToIntInst(
  3001. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3002. ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
  3003. assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
  3004. }
  3005. IntToPtrInst::IntToPtrInst(
  3006. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3007. ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
  3008. assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
  3009. }
  3010. IntToPtrInst::IntToPtrInst(
  3011. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3012. ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
  3013. assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
  3014. }
  3015. BitCastInst::BitCastInst(
  3016. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3017. ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
  3018. assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
  3019. }
  3020. BitCastInst::BitCastInst(
  3021. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3022. ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
  3023. assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
  3024. }
  3025. AddrSpaceCastInst::AddrSpaceCastInst(
  3026. Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
  3027. ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
  3028. assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
  3029. }
  3030. AddrSpaceCastInst::AddrSpaceCastInst(
  3031. Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
  3032. ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
  3033. assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
  3034. }
  3035. //===----------------------------------------------------------------------===//
  3036. // CmpInst Classes
  3037. //===----------------------------------------------------------------------===//
  3038. CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
  3039. Value *RHS, const Twine &Name, Instruction *InsertBefore,
  3040. Instruction *FlagsSource)
  3041. : Instruction(ty, op,
  3042. OperandTraits<CmpInst>::op_begin(this),
  3043. OperandTraits<CmpInst>::operands(this),
  3044. InsertBefore) {
  3045. Op<0>() = LHS;
  3046. Op<1>() = RHS;
  3047. setPredicate((Predicate)predicate);
  3048. setName(Name);
  3049. if (FlagsSource)
  3050. copyIRFlags(FlagsSource);
  3051. }
  3052. CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
  3053. Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
  3054. : Instruction(ty, op,
  3055. OperandTraits<CmpInst>::op_begin(this),
  3056. OperandTraits<CmpInst>::operands(this),
  3057. InsertAtEnd) {
  3058. Op<0>() = LHS;
  3059. Op<1>() = RHS;
  3060. setPredicate((Predicate)predicate);
  3061. setName(Name);
  3062. }
  3063. CmpInst *
  3064. CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
  3065. const Twine &Name, Instruction *InsertBefore) {
  3066. if (Op == Instruction::ICmp) {
  3067. if (InsertBefore)
  3068. return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
  3069. S1, S2, Name);
  3070. else
  3071. return new ICmpInst(CmpInst::Predicate(predicate),
  3072. S1, S2, Name);
  3073. }
  3074. if (InsertBefore)
  3075. return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
  3076. S1, S2, Name);
  3077. else
  3078. return new FCmpInst(CmpInst::Predicate(predicate),
  3079. S1, S2, Name);
  3080. }
  3081. CmpInst *
  3082. CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
  3083. const Twine &Name, BasicBlock *InsertAtEnd) {
  3084. if (Op == Instruction::ICmp) {
  3085. return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
  3086. S1, S2, Name);
  3087. }
  3088. return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
  3089. S1, S2, Name);
  3090. }
  3091. void CmpInst::swapOperands() {
  3092. if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
  3093. IC->swapOperands();
  3094. else
  3095. cast<FCmpInst>(this)->swapOperands();
  3096. }
  3097. bool CmpInst::isCommutative() const {
  3098. if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
  3099. return IC->isCommutative();
  3100. return cast<FCmpInst>(this)->isCommutative();
  3101. }
  3102. bool CmpInst::isEquality() const {
  3103. if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
  3104. return IC->isEquality();
  3105. return cast<FCmpInst>(this)->isEquality();
  3106. }
  3107. CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
  3108. switch (pred) {
  3109. default: llvm_unreachable("Unknown cmp predicate!");
  3110. case ICMP_EQ: return ICMP_NE;
  3111. case ICMP_NE: return ICMP_EQ;
  3112. case ICMP_UGT: return ICMP_ULE;
  3113. case ICMP_ULT: return ICMP_UGE;
  3114. case ICMP_UGE: return ICMP_ULT;
  3115. case ICMP_ULE: return ICMP_UGT;
  3116. case ICMP_SGT: return ICMP_SLE;
  3117. case ICMP_SLT: return ICMP_SGE;
  3118. case ICMP_SGE: return ICMP_SLT;
  3119. case ICMP_SLE: return ICMP_SGT;
  3120. case FCMP_OEQ: return FCMP_UNE;
  3121. case FCMP_ONE: return FCMP_UEQ;
  3122. case FCMP_OGT: return FCMP_ULE;
  3123. case FCMP_OLT: return FCMP_UGE;
  3124. case FCMP_OGE: return FCMP_ULT;
  3125. case FCMP_OLE: return FCMP_UGT;
  3126. case FCMP_UEQ: return FCMP_ONE;
  3127. case FCMP_UNE: return FCMP_OEQ;
  3128. case FCMP_UGT: return FCMP_OLE;
  3129. case FCMP_ULT: return FCMP_OGE;
  3130. case FCMP_UGE: return FCMP_OLT;
  3131. case FCMP_ULE: return FCMP_OGT;
  3132. case FCMP_ORD: return FCMP_UNO;
  3133. case FCMP_UNO: return FCMP_ORD;
  3134. case FCMP_TRUE: return FCMP_FALSE;
  3135. case FCMP_FALSE: return FCMP_TRUE;
  3136. }
  3137. }
  3138. StringRef CmpInst::getPredicateName(Predicate Pred) {
  3139. switch (Pred) {
  3140. default: return "unknown";
  3141. case FCmpInst::FCMP_FALSE: return "false";
  3142. case FCmpInst::FCMP_OEQ: return "oeq";
  3143. case FCmpInst::FCMP_OGT: return "ogt";
  3144. case FCmpInst::FCMP_OGE: return "oge";
  3145. case FCmpInst::FCMP_OLT: return "olt";
  3146. case FCmpInst::FCMP_OLE: return "ole";
  3147. case FCmpInst::FCMP_ONE: return "one";
  3148. case FCmpInst::FCMP_ORD: return "ord";
  3149. case FCmpInst::FCMP_UNO: return "uno";
  3150. case FCmpInst::FCMP_UEQ: return "ueq";
  3151. case FCmpInst::FCMP_UGT: return "ugt";
  3152. case FCmpInst::FCMP_UGE: return "uge";
  3153. case FCmpInst::FCMP_ULT: return "ult";
  3154. case FCmpInst::FCMP_ULE: return "ule";
  3155. case FCmpInst::FCMP_UNE: return "une";
  3156. case FCmpInst::FCMP_TRUE: return "true";
  3157. case ICmpInst::ICMP_EQ: return "eq";
  3158. case ICmpInst::ICMP_NE: return "ne";
  3159. case ICmpInst::ICMP_SGT: return "sgt";
  3160. case ICmpInst::ICMP_SGE: return "sge";
  3161. case ICmpInst::ICMP_SLT: return "slt";
  3162. case ICmpInst::ICMP_SLE: return "sle";
  3163. case ICmpInst::ICMP_UGT: return "ugt";
  3164. case ICmpInst::ICMP_UGE: return "uge";
  3165. case ICmpInst::ICMP_ULT: return "ult";
  3166. case ICmpInst::ICMP_ULE: return "ule";
  3167. }
  3168. }
  3169. ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
  3170. switch (pred) {
  3171. default: llvm_unreachable("Unknown icmp predicate!");
  3172. case ICMP_EQ: case ICMP_NE:
  3173. case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
  3174. return pred;
  3175. case ICMP_UGT: return ICMP_SGT;
  3176. case ICMP_ULT: return ICMP_SLT;
  3177. case ICMP_UGE: return ICMP_SGE;
  3178. case ICMP_ULE: return ICMP_SLE;
  3179. }
  3180. }
  3181. ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
  3182. switch (pred) {
  3183. default: llvm_unreachable("Unknown icmp predicate!");
  3184. case ICMP_EQ: case ICMP_NE:
  3185. case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
  3186. return pred;
  3187. case ICMP_SGT: return ICMP_UGT;
  3188. case ICMP_SLT: return ICMP_ULT;
  3189. case ICMP_SGE: return ICMP_UGE;
  3190. case ICMP_SLE: return ICMP_ULE;
  3191. }
  3192. }
  3193. CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
  3194. switch (pred) {
  3195. default: llvm_unreachable("Unknown or unsupported cmp predicate!");
  3196. case ICMP_SGT: return ICMP_SGE;
  3197. case ICMP_SLT: return ICMP_SLE;
  3198. case ICMP_SGE: return ICMP_SGT;
  3199. case ICMP_SLE: return ICMP_SLT;
  3200. case ICMP_UGT: return ICMP_UGE;
  3201. case ICMP_ULT: return ICMP_ULE;
  3202. case ICMP_UGE: return ICMP_UGT;
  3203. case ICMP_ULE: return ICMP_ULT;
  3204. case FCMP_OGT: return FCMP_OGE;
  3205. case FCMP_OLT: return FCMP_OLE;
  3206. case FCMP_OGE: return FCMP_OGT;
  3207. case FCMP_OLE: return FCMP_OLT;
  3208. case FCMP_UGT: return FCMP_UGE;
  3209. case FCMP_ULT: return FCMP_ULE;
  3210. case FCMP_UGE: return FCMP_UGT;
  3211. case FCMP_ULE: return FCMP_ULT;
  3212. }
  3213. }
  3214. CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
  3215. switch (pred) {
  3216. default: llvm_unreachable("Unknown cmp predicate!");
  3217. case ICMP_EQ: case ICMP_NE:
  3218. return pred;
  3219. case ICMP_SGT: return ICMP_SLT;
  3220. case ICMP_SLT: return ICMP_SGT;
  3221. case ICMP_SGE: return ICMP_SLE;
  3222. case ICMP_SLE: return ICMP_SGE;
  3223. case ICMP_UGT: return ICMP_ULT;
  3224. case ICMP_ULT: return ICMP_UGT;
  3225. case ICMP_UGE: return ICMP_ULE;
  3226. case ICMP_ULE: return ICMP_UGE;
  3227. case FCMP_FALSE: case FCMP_TRUE:
  3228. case FCMP_OEQ: case FCMP_ONE:
  3229. case FCMP_UEQ: case FCMP_UNE:
  3230. case FCMP_ORD: case FCMP_UNO:
  3231. return pred;
  3232. case FCMP_OGT: return FCMP_OLT;
  3233. case FCMP_OLT: return FCMP_OGT;
  3234. case FCMP_OGE: return FCMP_OLE;
  3235. case FCMP_OLE: return FCMP_OGE;
  3236. case FCMP_UGT: return FCMP_ULT;
  3237. case FCMP_ULT: return FCMP_UGT;
  3238. case FCMP_UGE: return FCMP_ULE;
  3239. case FCMP_ULE: return FCMP_UGE;
  3240. }
  3241. }
  3242. CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
  3243. switch (pred) {
  3244. case ICMP_SGT: return ICMP_SGE;
  3245. case ICMP_SLT: return ICMP_SLE;
  3246. case ICMP_UGT: return ICMP_UGE;
  3247. case ICMP_ULT: return ICMP_ULE;
  3248. case FCMP_OGT: return FCMP_OGE;
  3249. case FCMP_OLT: return FCMP_OLE;
  3250. case FCMP_UGT: return FCMP_UGE;
  3251. case FCMP_ULT: return FCMP_ULE;
  3252. default: return pred;
  3253. }
  3254. }
  3255. CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
  3256. assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!");
  3257. switch (pred) {
  3258. default:
  3259. llvm_unreachable("Unknown predicate!");
  3260. case CmpInst::ICMP_ULT:
  3261. return CmpInst::ICMP_SLT;
  3262. case CmpInst::ICMP_ULE:
  3263. return CmpInst::ICMP_SLE;
  3264. case CmpInst::ICMP_UGT:
  3265. return CmpInst::ICMP_SGT;
  3266. case CmpInst::ICMP_UGE:
  3267. return CmpInst::ICMP_SGE;
  3268. }
  3269. }
  3270. bool CmpInst::isUnsigned(Predicate predicate) {
  3271. switch (predicate) {
  3272. default: return false;
  3273. case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
  3274. case ICmpInst::ICMP_UGE: return true;
  3275. }
  3276. }
  3277. bool CmpInst::isSigned(Predicate predicate) {
  3278. switch (predicate) {
  3279. default: return false;
  3280. case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
  3281. case ICmpInst::ICMP_SGE: return true;
  3282. }
  3283. }
  3284. bool CmpInst::isOrdered(Predicate predicate) {
  3285. switch (predicate) {
  3286. default: return false;
  3287. case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
  3288. case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
  3289. case FCmpInst::FCMP_ORD: return true;
  3290. }
  3291. }
  3292. bool CmpInst::isUnordered(Predicate predicate) {
  3293. switch (predicate) {
  3294. default: return false;
  3295. case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
  3296. case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
  3297. case FCmpInst::FCMP_UNO: return true;
  3298. }
  3299. }
  3300. bool CmpInst::isTrueWhenEqual(Predicate predicate) {
  3301. switch(predicate) {
  3302. default: return false;
  3303. case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
  3304. case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
  3305. }
  3306. }
  3307. bool CmpInst::isFalseWhenEqual(Predicate predicate) {
  3308. switch(predicate) {
  3309. case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
  3310. case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
  3311. default: return false;
  3312. }
  3313. }
  3314. bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
  3315. // If the predicates match, then we know the first condition implies the
  3316. // second is true.
  3317. if (Pred1 == Pred2)
  3318. return true;
  3319. switch (Pred1) {
  3320. default:
  3321. break;
  3322. case ICMP_EQ:
  3323. // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
  3324. return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
  3325. Pred2 == ICMP_SLE;
  3326. case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
  3327. return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
  3328. case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
  3329. return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
  3330. case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
  3331. return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
  3332. case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
  3333. return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
  3334. }
  3335. return false;
  3336. }
  3337. bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
  3338. return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
  3339. }
  3340. //===----------------------------------------------------------------------===//
  3341. // SwitchInst Implementation
  3342. //===----------------------------------------------------------------------===//
  3343. void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
  3344. assert(Value && Default && NumReserved);
  3345. ReservedSpace = NumReserved;
  3346. setNumHungOffUseOperands(2);
  3347. allocHungoffUses(ReservedSpace);
  3348. Op<0>() = Value;
  3349. Op<1>() = Default;
  3350. }
  3351. /// SwitchInst ctor - Create a new switch instruction, specifying a value to
  3352. /// switch on and a default destination. The number of additional cases can
  3353. /// be specified here to make memory allocation more efficient. This
  3354. /// constructor can also autoinsert before another instruction.
  3355. SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
  3356. Instruction *InsertBefore)
  3357. : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
  3358. nullptr, 0, InsertBefore) {
  3359. init(Value, Default, 2+NumCases*2);
  3360. }
  3361. /// SwitchInst ctor - Create a new switch instruction, specifying a value to
  3362. /// switch on and a default destination. The number of additional cases can
  3363. /// be specified here to make memory allocation more efficient. This
  3364. /// constructor also autoinserts at the end of the specified BasicBlock.
  3365. SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
  3366. BasicBlock *InsertAtEnd)
  3367. : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
  3368. nullptr, 0, InsertAtEnd) {
  3369. init(Value, Default, 2+NumCases*2);
  3370. }
  3371. SwitchInst::SwitchInst(const SwitchInst &SI)
  3372. : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
  3373. init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
  3374. setNumHungOffUseOperands(SI.getNumOperands());
  3375. Use *OL = getOperandList();
  3376. const Use *InOL = SI.getOperandList();
  3377. for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
  3378. OL[i] = InOL[i];
  3379. OL[i+1] = InOL[i+1];
  3380. }
  3381. SubclassOptionalData = SI.SubclassOptionalData;
  3382. }
  3383. /// addCase - Add an entry to the switch instruction...
  3384. ///
  3385. void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
  3386. unsigned NewCaseIdx = getNumCases();
  3387. unsigned OpNo = getNumOperands();
  3388. if (OpNo+2 > ReservedSpace)
  3389. growOperands(); // Get more space!
  3390. // Initialize some new operands.
  3391. assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
  3392. setNumHungOffUseOperands(OpNo+2);
  3393. CaseHandle Case(this, NewCaseIdx);
  3394. Case.setValue(OnVal);
  3395. Case.setSuccessor(Dest);
  3396. }
  3397. /// removeCase - This method removes the specified case and its successor
  3398. /// from the switch instruction.
  3399. SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
  3400. unsigned idx = I->getCaseIndex();
  3401. assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
  3402. unsigned NumOps = getNumOperands();
  3403. Use *OL = getOperandList();
  3404. // Overwrite this case with the end of the list.
  3405. if (2 + (idx + 1) * 2 != NumOps) {
  3406. OL[2 + idx * 2] = OL[NumOps - 2];
  3407. OL[2 + idx * 2 + 1] = OL[NumOps - 1];
  3408. }
  3409. // Nuke the last value.
  3410. OL[NumOps-2].set(nullptr);
  3411. OL[NumOps-2+1].set(nullptr);
  3412. setNumHungOffUseOperands(NumOps-2);
  3413. return CaseIt(this, idx);
  3414. }
  3415. /// growOperands - grow operands - This grows the operand list in response
  3416. /// to a push_back style of operation. This grows the number of ops by 3 times.
  3417. ///
  3418. void SwitchInst::growOperands() {
  3419. unsigned e = getNumOperands();
  3420. unsigned NumOps = e*3;
  3421. ReservedSpace = NumOps;
  3422. growHungoffUses(ReservedSpace);
  3423. }
  3424. MDNode *
  3425. SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) {
  3426. if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof))
  3427. if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0)))
  3428. if (MDName->getString() == "branch_weights")
  3429. return ProfileData;
  3430. return nullptr;
  3431. }
  3432. MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
  3433. assert(Changed && "called only if metadata has changed");
  3434. if (!Weights)
  3435. return nullptr;
  3436. assert(SI.getNumSuccessors() == Weights->size() &&
  3437. "num of prof branch_weights must accord with num of successors");
  3438. bool AllZeroes =
  3439. all_of(Weights.getValue(), [](uint32_t W) { return W == 0; });
  3440. if (AllZeroes || Weights.getValue().size() < 2)
  3441. return nullptr;
  3442. return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
  3443. }
  3444. void SwitchInstProfUpdateWrapper::init() {
  3445. MDNode *ProfileData = getProfBranchWeightsMD(SI);
  3446. if (!ProfileData)
  3447. return;
  3448. if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
  3449. llvm_unreachable("number of prof branch_weights metadata operands does "
  3450. "not correspond to number of succesors");
  3451. }
  3452. SmallVector<uint32_t, 8> Weights;
  3453. for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) {
  3454. ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI));
  3455. uint32_t CW = C->getValue().getZExtValue();
  3456. Weights.push_back(CW);
  3457. }
  3458. this->Weights = std::move(Weights);
  3459. }
  3460. SwitchInst::CaseIt
  3461. SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
  3462. if (Weights) {
  3463. assert(SI.getNumSuccessors() == Weights->size() &&
  3464. "num of prof branch_weights must accord with num of successors");
  3465. Changed = true;
  3466. // Copy the last case to the place of the removed one and shrink.
  3467. // This is tightly coupled with the way SwitchInst::removeCase() removes
  3468. // the cases in SwitchInst::removeCase(CaseIt).
  3469. Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back();
  3470. Weights.getValue().pop_back();
  3471. }
  3472. return SI.removeCase(I);
  3473. }
  3474. void SwitchInstProfUpdateWrapper::addCase(
  3475. ConstantInt *OnVal, BasicBlock *Dest,
  3476. SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
  3477. SI.addCase(OnVal, Dest);
  3478. if (!Weights && W && *W) {
  3479. Changed = true;
  3480. Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
  3481. Weights.getValue()[SI.getNumSuccessors() - 1] = *W;
  3482. } else if (Weights) {
  3483. Changed = true;
  3484. Weights.getValue().push_back(W ? *W : 0);
  3485. }
  3486. if (Weights)
  3487. assert(SI.getNumSuccessors() == Weights->size() &&
  3488. "num of prof branch_weights must accord with num of successors");
  3489. }
  3490. SymbolTableList<Instruction>::iterator
  3491. SwitchInstProfUpdateWrapper::eraseFromParent() {
  3492. // Instruction is erased. Mark as unchanged to not touch it in the destructor.
  3493. Changed = false;
  3494. if (Weights)
  3495. Weights->resize(0);
  3496. return SI.eraseFromParent();
  3497. }
  3498. SwitchInstProfUpdateWrapper::CaseWeightOpt
  3499. SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
  3500. if (!Weights)
  3501. return None;
  3502. return Weights.getValue()[idx];
  3503. }
  3504. void SwitchInstProfUpdateWrapper::setSuccessorWeight(
  3505. unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
  3506. if (!W)
  3507. return;
  3508. if (!Weights && *W)
  3509. Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
  3510. if (Weights) {
  3511. auto &OldW = Weights.getValue()[idx];
  3512. if (*W != OldW) {
  3513. Changed = true;
  3514. OldW = *W;
  3515. }
  3516. }
  3517. }
  3518. SwitchInstProfUpdateWrapper::CaseWeightOpt
  3519. SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
  3520. unsigned idx) {
  3521. if (MDNode *ProfileData = getProfBranchWeightsMD(SI))
  3522. if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
  3523. return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
  3524. ->getValue()
  3525. .getZExtValue();
  3526. return None;
  3527. }
  3528. //===----------------------------------------------------------------------===//
  3529. // IndirectBrInst Implementation
  3530. //===----------------------------------------------------------------------===//
  3531. void IndirectBrInst::init(Value *Address, unsigned NumDests) {
  3532. assert(Address && Address->getType()->isPointerTy() &&
  3533. "Address of indirectbr must be a pointer");
  3534. ReservedSpace = 1+NumDests;
  3535. setNumHungOffUseOperands(1);
  3536. allocHungoffUses(ReservedSpace);
  3537. Op<0>() = Address;
  3538. }
  3539. /// growOperands - grow operands - This grows the operand list in response
  3540. /// to a push_back style of operation. This grows the number of ops by 2 times.
  3541. ///
  3542. void IndirectBrInst::growOperands() {
  3543. unsigned e = getNumOperands();
  3544. unsigned NumOps = e*2;
  3545. ReservedSpace = NumOps;
  3546. growHungoffUses(ReservedSpace);
  3547. }
  3548. IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
  3549. Instruction *InsertBefore)
  3550. : Instruction(Type::getVoidTy(Address->getContext()),
  3551. Instruction::IndirectBr, nullptr, 0, InsertBefore) {
  3552. init(Address, NumCases);
  3553. }
  3554. IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
  3555. BasicBlock *InsertAtEnd)
  3556. : Instruction(Type::getVoidTy(Address->getContext()),
  3557. Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
  3558. init(Address, NumCases);
  3559. }
  3560. IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
  3561. : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
  3562. nullptr, IBI.getNumOperands()) {
  3563. allocHungoffUses(IBI.getNumOperands());
  3564. Use *OL = getOperandList();
  3565. const Use *InOL = IBI.getOperandList();
  3566. for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
  3567. OL[i] = InOL[i];
  3568. SubclassOptionalData = IBI.SubclassOptionalData;
  3569. }
  3570. /// addDestination - Add a destination.
  3571. ///
  3572. void IndirectBrInst::addDestination(BasicBlock *DestBB) {
  3573. unsigned OpNo = getNumOperands();
  3574. if (OpNo+1 > ReservedSpace)
  3575. growOperands(); // Get more space!
  3576. // Initialize some new operands.
  3577. assert(OpNo < ReservedSpace && "Growing didn't work!");
  3578. setNumHungOffUseOperands(OpNo+1);
  3579. getOperandList()[OpNo] = DestBB;
  3580. }
  3581. /// removeDestination - This method removes the specified successor from the
  3582. /// indirectbr instruction.
  3583. void IndirectBrInst::removeDestination(unsigned idx) {
  3584. assert(idx < getNumOperands()-1 && "Successor index out of range!");
  3585. unsigned NumOps = getNumOperands();
  3586. Use *OL = getOperandList();
  3587. // Replace this value with the last one.
  3588. OL[idx+1] = OL[NumOps-1];
  3589. // Nuke the last value.
  3590. OL[NumOps-1].set(nullptr);
  3591. setNumHungOffUseOperands(NumOps-1);
  3592. }
  3593. //===----------------------------------------------------------------------===//
  3594. // cloneImpl() implementations
  3595. //===----------------------------------------------------------------------===//
  3596. // Define these methods here so vtables don't get emitted into every translation
  3597. // unit that uses these classes.
  3598. GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
  3599. return new (getNumOperands()) GetElementPtrInst(*this);
  3600. }
  3601. UnaryOperator *UnaryOperator::cloneImpl() const {
  3602. return Create(getOpcode(), Op<0>());
  3603. }
  3604. BinaryOperator *BinaryOperator::cloneImpl() const {
  3605. return Create(getOpcode(), Op<0>(), Op<1>());
  3606. }
  3607. FCmpInst *FCmpInst::cloneImpl() const {
  3608. return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
  3609. }
  3610. ICmpInst *ICmpInst::cloneImpl() const {
  3611. return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
  3612. }
  3613. ExtractValueInst *ExtractValueInst::cloneImpl() const {
  3614. return new ExtractValueInst(*this);
  3615. }
  3616. InsertValueInst *InsertValueInst::cloneImpl() const {
  3617. return new InsertValueInst(*this);
  3618. }
  3619. AllocaInst *AllocaInst::cloneImpl() const {
  3620. AllocaInst *Result = new AllocaInst(getAllocatedType(),
  3621. getType()->getAddressSpace(),
  3622. (Value *)getOperand(0), getAlignment());
  3623. Result->setUsedWithInAlloca(isUsedWithInAlloca());
  3624. Result->setSwiftError(isSwiftError());
  3625. return Result;
  3626. }
  3627. LoadInst *LoadInst::cloneImpl() const {
  3628. return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
  3629. getAlignment(), getOrdering(), getSyncScopeID());
  3630. }
  3631. StoreInst *StoreInst::cloneImpl() const {
  3632. return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
  3633. getAlignment(), getOrdering(), getSyncScopeID());
  3634. }
  3635. AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
  3636. AtomicCmpXchgInst *Result =
  3637. new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
  3638. getSuccessOrdering(), getFailureOrdering(),
  3639. getSyncScopeID());
  3640. Result->setVolatile(isVolatile());
  3641. Result->setWeak(isWeak());
  3642. return Result;
  3643. }
  3644. AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
  3645. AtomicRMWInst *Result =
  3646. new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
  3647. getOrdering(), getSyncScopeID());
  3648. Result->setVolatile(isVolatile());
  3649. return Result;
  3650. }
  3651. FenceInst *FenceInst::cloneImpl() const {
  3652. return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
  3653. }
  3654. TruncInst *TruncInst::cloneImpl() const {
  3655. return new TruncInst(getOperand(0), getType());
  3656. }
  3657. ZExtInst *ZExtInst::cloneImpl() const {
  3658. return new ZExtInst(getOperand(0), getType());
  3659. }
  3660. SExtInst *SExtInst::cloneImpl() const {
  3661. return new SExtInst(getOperand(0), getType());
  3662. }
  3663. FPTruncInst *FPTruncInst::cloneImpl() const {
  3664. return new FPTruncInst(getOperand(0), getType());
  3665. }
  3666. FPExtInst *FPExtInst::cloneImpl() const {
  3667. return new FPExtInst(getOperand(0), getType());
  3668. }
  3669. UIToFPInst *UIToFPInst::cloneImpl() const {
  3670. return new UIToFPInst(getOperand(0), getType());
  3671. }
  3672. SIToFPInst *SIToFPInst::cloneImpl() const {
  3673. return new SIToFPInst(getOperand(0), getType());
  3674. }
  3675. FPToUIInst *FPToUIInst::cloneImpl() const {
  3676. return new FPToUIInst(getOperand(0), getType());
  3677. }
  3678. FPToSIInst *FPToSIInst::cloneImpl() const {
  3679. return new FPToSIInst(getOperand(0), getType());
  3680. }
  3681. PtrToIntInst *PtrToIntInst::cloneImpl() const {
  3682. return new PtrToIntInst(getOperand(0), getType());
  3683. }
  3684. IntToPtrInst *IntToPtrInst::cloneImpl() const {
  3685. return new IntToPtrInst(getOperand(0), getType());
  3686. }
  3687. BitCastInst *BitCastInst::cloneImpl() const {
  3688. return new BitCastInst(getOperand(0), getType());
  3689. }
  3690. AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
  3691. return new AddrSpaceCastInst(getOperand(0), getType());
  3692. }
  3693. CallInst *CallInst::cloneImpl() const {
  3694. if (hasOperandBundles()) {
  3695. unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
  3696. return new(getNumOperands(), DescriptorBytes) CallInst(*this);
  3697. }
  3698. return new(getNumOperands()) CallInst(*this);
  3699. }
  3700. SelectInst *SelectInst::cloneImpl() const {
  3701. return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
  3702. }
  3703. VAArgInst *VAArgInst::cloneImpl() const {
  3704. return new VAArgInst(getOperand(0), getType());
  3705. }
  3706. ExtractElementInst *ExtractElementInst::cloneImpl() const {
  3707. return ExtractElementInst::Create(getOperand(0), getOperand(1));
  3708. }
  3709. InsertElementInst *InsertElementInst::cloneImpl() const {
  3710. return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
  3711. }
  3712. ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
  3713. return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2));
  3714. }
  3715. PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
  3716. LandingPadInst *LandingPadInst::cloneImpl() const {
  3717. return new LandingPadInst(*this);
  3718. }
  3719. ReturnInst *ReturnInst::cloneImpl() const {
  3720. return new(getNumOperands()) ReturnInst(*this);
  3721. }
  3722. BranchInst *BranchInst::cloneImpl() const {
  3723. return new(getNumOperands()) BranchInst(*this);
  3724. }
  3725. SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
  3726. IndirectBrInst *IndirectBrInst::cloneImpl() const {
  3727. return new IndirectBrInst(*this);
  3728. }
  3729. InvokeInst *InvokeInst::cloneImpl() const {
  3730. if (hasOperandBundles()) {
  3731. unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
  3732. return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
  3733. }
  3734. return new(getNumOperands()) InvokeInst(*this);
  3735. }
  3736. CallBrInst *CallBrInst::cloneImpl() const {
  3737. if (hasOperandBundles()) {
  3738. unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
  3739. return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
  3740. }
  3741. return new (getNumOperands()) CallBrInst(*this);
  3742. }
  3743. ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
  3744. CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
  3745. return new (getNumOperands()) CleanupReturnInst(*this);
  3746. }
  3747. CatchReturnInst *CatchReturnInst::cloneImpl() const {
  3748. return new (getNumOperands()) CatchReturnInst(*this);
  3749. }
  3750. CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
  3751. return new CatchSwitchInst(*this);
  3752. }
  3753. FuncletPadInst *FuncletPadInst::cloneImpl() const {
  3754. return new (getNumOperands()) FuncletPadInst(*this);
  3755. }
  3756. UnreachableInst *UnreachableInst::cloneImpl() const {
  3757. LLVMContext &Context = getContext();
  3758. return new UnreachableInst(Context);
  3759. }