CodeGenPrepare.cpp 237 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373
  1. //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This pass munges the code in the input function to better prepare it for
  11. // SelectionDAG-based code generation. This works around limitations in it's
  12. // basic-block-at-a-time approach. It should eventually be removed.
  13. //
  14. //===----------------------------------------------------------------------===//
  15. #include "llvm/CodeGen/Passes.h"
  16. #include "llvm/ADT/DenseMap.h"
  17. #include "llvm/ADT/SetVector.h"
  18. #include "llvm/ADT/SmallSet.h"
  19. #include "llvm/ADT/Statistic.h"
  20. #include "llvm/Analysis/BlockFrequencyInfo.h"
  21. #include "llvm/Analysis/BranchProbabilityInfo.h"
  22. #include "llvm/Analysis/CFG.h"
  23. #include "llvm/Analysis/InstructionSimplify.h"
  24. #include "llvm/Analysis/LoopInfo.h"
  25. #include "llvm/Analysis/ProfileSummaryInfo.h"
  26. #include "llvm/Analysis/TargetLibraryInfo.h"
  27. #include "llvm/Analysis/TargetTransformInfo.h"
  28. #include "llvm/Analysis/ValueTracking.h"
  29. #include "llvm/Analysis/MemoryBuiltins.h"
  30. #include "llvm/CodeGen/Analysis.h"
  31. #include "llvm/IR/CallSite.h"
  32. #include "llvm/IR/Constants.h"
  33. #include "llvm/IR/DataLayout.h"
  34. #include "llvm/IR/DerivedTypes.h"
  35. #include "llvm/IR/Dominators.h"
  36. #include "llvm/IR/Function.h"
  37. #include "llvm/IR/GetElementPtrTypeIterator.h"
  38. #include "llvm/IR/IRBuilder.h"
  39. #include "llvm/IR/InlineAsm.h"
  40. #include "llvm/IR/Instructions.h"
  41. #include "llvm/IR/IntrinsicInst.h"
  42. #include "llvm/IR/MDBuilder.h"
  43. #include "llvm/IR/PatternMatch.h"
  44. #include "llvm/IR/Statepoint.h"
  45. #include "llvm/IR/ValueHandle.h"
  46. #include "llvm/IR/ValueMap.h"
  47. #include "llvm/Pass.h"
  48. #include "llvm/Support/BranchProbability.h"
  49. #include "llvm/Support/CommandLine.h"
  50. #include "llvm/Support/Debug.h"
  51. #include "llvm/Support/raw_ostream.h"
  52. #include "llvm/Target/TargetLowering.h"
  53. #include "llvm/Target/TargetSubtargetInfo.h"
  54. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  55. #include "llvm/Transforms/Utils/BuildLibCalls.h"
  56. #include "llvm/Transforms/Utils/BypassSlowDivision.h"
  57. #include "llvm/Transforms/Utils/Cloning.h"
  58. #include "llvm/Transforms/Utils/Local.h"
  59. #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
  60. #include "llvm/Transforms/Utils/ValueMapper.h"
  61. using namespace llvm;
  62. using namespace llvm::PatternMatch;
  63. #define DEBUG_TYPE "codegenprepare"
  64. STATISTIC(NumBlocksElim, "Number of blocks eliminated");
  65. STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
  66. STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
  67. STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
  68. "sunken Cmps");
  69. STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
  70. "of sunken Casts");
  71. STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
  72. "computations were sunk");
  73. STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
  74. STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
  75. STATISTIC(NumAndsAdded,
  76. "Number of and mask instructions added to form ext loads");
  77. STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
  78. STATISTIC(NumRetsDup, "Number of return instructions duplicated");
  79. STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
  80. STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
  81. STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
  82. static cl::opt<bool> DisableBranchOpts(
  83. "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
  84. cl::desc("Disable branch optimizations in CodeGenPrepare"));
  85. static cl::opt<bool>
  86. DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
  87. cl::desc("Disable GC optimizations in CodeGenPrepare"));
  88. static cl::opt<bool> DisableSelectToBranch(
  89. "disable-cgp-select2branch", cl::Hidden, cl::init(false),
  90. cl::desc("Disable select to branch conversion."));
  91. static cl::opt<bool> AddrSinkUsingGEPs(
  92. "addr-sink-using-gep", cl::Hidden, cl::init(true),
  93. cl::desc("Address sinking in CGP using GEPs."));
  94. static cl::opt<bool> EnableAndCmpSinking(
  95. "enable-andcmp-sinking", cl::Hidden, cl::init(true),
  96. cl::desc("Enable sinkinig and/cmp into branches."));
  97. static cl::opt<bool> DisableStoreExtract(
  98. "disable-cgp-store-extract", cl::Hidden, cl::init(false),
  99. cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
  100. static cl::opt<bool> StressStoreExtract(
  101. "stress-cgp-store-extract", cl::Hidden, cl::init(false),
  102. cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
  103. static cl::opt<bool> DisableExtLdPromotion(
  104. "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
  105. cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
  106. "CodeGenPrepare"));
  107. static cl::opt<bool> StressExtLdPromotion(
  108. "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
  109. cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
  110. "optimization in CodeGenPrepare"));
  111. static cl::opt<bool> DisablePreheaderProtect(
  112. "disable-preheader-prot", cl::Hidden, cl::init(false),
  113. cl::desc("Disable protection against removing loop preheaders"));
  114. static cl::opt<bool> ProfileGuidedSectionPrefix(
  115. "profile-guided-section-prefix", cl::Hidden, cl::init(true),
  116. cl::desc("Use profile info to add section prefix for hot/cold functions"));
  117. static cl::opt<unsigned> FreqRatioToSkipMerge(
  118. "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
  119. cl::desc("Skip merging empty blocks if (frequency of empty block) / "
  120. "(frequency of destination block) is greater than this ratio"));
  121. static cl::opt<bool> ForceSplitStore(
  122. "force-split-store", cl::Hidden, cl::init(false),
  123. cl::desc("Force store splitting no matter what the target query says."));
  124. static cl::opt<bool>
  125. EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden,
  126. cl::desc("Enable merging of redundant sexts when one is dominating"
  127. " the other."), cl::init(true));
  128. namespace {
  129. typedef SmallPtrSet<Instruction *, 16> SetOfInstrs;
  130. typedef PointerIntPair<Type *, 1, bool> TypeIsSExt;
  131. typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy;
  132. typedef SmallVector<Instruction *, 16> SExts;
  133. typedef DenseMap<Value *, SExts> ValueToSExts;
  134. class TypePromotionTransaction;
  135. class CodeGenPrepare : public FunctionPass {
  136. const TargetMachine *TM;
  137. const TargetSubtargetInfo *SubtargetInfo;
  138. const TargetLowering *TLI;
  139. const TargetRegisterInfo *TRI;
  140. const TargetTransformInfo *TTI;
  141. const TargetLibraryInfo *TLInfo;
  142. const LoopInfo *LI;
  143. std::unique_ptr<BlockFrequencyInfo> BFI;
  144. std::unique_ptr<BranchProbabilityInfo> BPI;
  145. /// As we scan instructions optimizing them, this is the next instruction
  146. /// to optimize. Transforms that can invalidate this should update it.
  147. BasicBlock::iterator CurInstIterator;
  148. /// Keeps track of non-local addresses that have been sunk into a block.
  149. /// This allows us to avoid inserting duplicate code for blocks with
  150. /// multiple load/stores of the same address.
  151. ValueMap<Value*, Value*> SunkAddrs;
  152. /// Keeps track of all instructions inserted for the current function.
  153. SetOfInstrs InsertedInsts;
  154. /// Keeps track of the type of the related instruction before their
  155. /// promotion for the current function.
  156. InstrToOrigTy PromotedInsts;
  157. /// Keep track of instructions removed during promotion.
  158. SetOfInstrs RemovedInsts;
  159. /// Keep track of sext chains based on their initial value.
  160. DenseMap<Value *, Instruction *> SeenChainsForSExt;
  161. /// Keep track of SExt promoted.
  162. ValueToSExts ValToSExtendedUses;
  163. /// True if CFG is modified in any way.
  164. bool ModifiedDT;
  165. /// True if optimizing for size.
  166. bool OptSize;
  167. /// DataLayout for the Function being processed.
  168. const DataLayout *DL;
  169. public:
  170. static char ID; // Pass identification, replacement for typeid
  171. explicit CodeGenPrepare(const TargetMachine *TM = nullptr)
  172. : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) {
  173. initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
  174. }
  175. bool runOnFunction(Function &F) override;
  176. StringRef getPassName() const override { return "CodeGen Prepare"; }
  177. void getAnalysisUsage(AnalysisUsage &AU) const override {
  178. // FIXME: When we can selectively preserve passes, preserve the domtree.
  179. AU.addRequired<ProfileSummaryInfoWrapperPass>();
  180. AU.addRequired<TargetLibraryInfoWrapperPass>();
  181. AU.addRequired<TargetTransformInfoWrapperPass>();
  182. AU.addRequired<LoopInfoWrapperPass>();
  183. }
  184. private:
  185. bool eliminateFallThrough(Function &F);
  186. bool eliminateMostlyEmptyBlocks(Function &F);
  187. BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
  188. bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
  189. void eliminateMostlyEmptyBlock(BasicBlock *BB);
  190. bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
  191. bool isPreheader);
  192. bool optimizeBlock(BasicBlock &BB, bool& ModifiedDT);
  193. bool optimizeInst(Instruction *I, bool& ModifiedDT);
  194. bool optimizeMemoryInst(Instruction *I, Value *Addr,
  195. Type *AccessTy, unsigned AS);
  196. bool optimizeInlineAsmInst(CallInst *CS);
  197. bool optimizeCallInst(CallInst *CI, bool& ModifiedDT);
  198. bool optimizeExt(Instruction *&I);
  199. bool optimizeExtUses(Instruction *I);
  200. bool optimizeLoadExt(LoadInst *I);
  201. bool optimizeSelectInst(SelectInst *SI);
  202. bool optimizeShuffleVectorInst(ShuffleVectorInst *SI);
  203. bool optimizeSwitchInst(SwitchInst *CI);
  204. bool optimizeExtractElementInst(Instruction *Inst);
  205. bool dupRetToEnableTailCallOpts(BasicBlock *BB);
  206. bool placeDbgValues(Function &F);
  207. bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
  208. LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
  209. bool tryToPromoteExts(TypePromotionTransaction &TPT,
  210. const SmallVectorImpl<Instruction *> &Exts,
  211. SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
  212. unsigned CreatedInstsCost = 0);
  213. bool mergeSExts(Function &F);
  214. bool performAddressTypePromotion(
  215. Instruction *&Inst,
  216. bool AllowPromotionWithoutCommonHeader,
  217. bool HasPromoted, TypePromotionTransaction &TPT,
  218. SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
  219. bool splitBranchCondition(Function &F);
  220. bool simplifyOffsetableRelocate(Instruction &I);
  221. bool splitIndirectCriticalEdges(Function &F);
  222. };
  223. }
  224. char CodeGenPrepare::ID = 0;
  225. INITIALIZE_TM_PASS_BEGIN(CodeGenPrepare, "codegenprepare",
  226. "Optimize for code generation", false, false)
  227. INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
  228. INITIALIZE_TM_PASS_END(CodeGenPrepare, "codegenprepare",
  229. "Optimize for code generation", false, false)
  230. FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) {
  231. return new CodeGenPrepare(TM);
  232. }
  233. bool CodeGenPrepare::runOnFunction(Function &F) {
  234. if (skipFunction(F))
  235. return false;
  236. DL = &F.getParent()->getDataLayout();
  237. bool EverMadeChange = false;
  238. // Clear per function information.
  239. InsertedInsts.clear();
  240. PromotedInsts.clear();
  241. BFI.reset();
  242. BPI.reset();
  243. ModifiedDT = false;
  244. if (TM) {
  245. SubtargetInfo = TM->getSubtargetImpl(F);
  246. TLI = SubtargetInfo->getTargetLowering();
  247. TRI = SubtargetInfo->getRegisterInfo();
  248. }
  249. TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
  250. TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
  251. LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  252. OptSize = F.optForSize();
  253. if (ProfileGuidedSectionPrefix) {
  254. ProfileSummaryInfo *PSI =
  255. getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
  256. if (PSI->isFunctionHotInCallGraph(&F))
  257. F.setSectionPrefix(".hot");
  258. else if (PSI->isFunctionColdInCallGraph(&F))
  259. F.setSectionPrefix(".cold");
  260. }
  261. /// This optimization identifies DIV instructions that can be
  262. /// profitably bypassed and carried out with a shorter, faster divide.
  263. if (!OptSize && TLI && TLI->isSlowDivBypassed()) {
  264. const DenseMap<unsigned int, unsigned int> &BypassWidths =
  265. TLI->getBypassSlowDivWidths();
  266. BasicBlock* BB = &*F.begin();
  267. while (BB != nullptr) {
  268. // bypassSlowDivision may create new BBs, but we don't want to reapply the
  269. // optimization to those blocks.
  270. BasicBlock* Next = BB->getNextNode();
  271. EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
  272. BB = Next;
  273. }
  274. }
  275. // Eliminate blocks that contain only PHI nodes and an
  276. // unconditional branch.
  277. EverMadeChange |= eliminateMostlyEmptyBlocks(F);
  278. // llvm.dbg.value is far away from the value then iSel may not be able
  279. // handle it properly. iSel will drop llvm.dbg.value if it can not
  280. // find a node corresponding to the value.
  281. EverMadeChange |= placeDbgValues(F);
  282. if (!DisableBranchOpts)
  283. EverMadeChange |= splitBranchCondition(F);
  284. // Split some critical edges where one of the sources is an indirect branch,
  285. // to help generate sane code for PHIs involving such edges.
  286. EverMadeChange |= splitIndirectCriticalEdges(F);
  287. bool MadeChange = true;
  288. while (MadeChange) {
  289. MadeChange = false;
  290. SeenChainsForSExt.clear();
  291. ValToSExtendedUses.clear();
  292. RemovedInsts.clear();
  293. for (Function::iterator I = F.begin(); I != F.end(); ) {
  294. BasicBlock *BB = &*I++;
  295. bool ModifiedDTOnIteration = false;
  296. MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration);
  297. // Restart BB iteration if the dominator tree of the Function was changed
  298. if (ModifiedDTOnIteration)
  299. break;
  300. }
  301. if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
  302. MadeChange |= mergeSExts(F);
  303. // Really free removed instructions during promotion.
  304. for (Instruction *I : RemovedInsts)
  305. delete I;
  306. EverMadeChange |= MadeChange;
  307. }
  308. SunkAddrs.clear();
  309. if (!DisableBranchOpts) {
  310. MadeChange = false;
  311. SmallPtrSet<BasicBlock*, 8> WorkList;
  312. for (BasicBlock &BB : F) {
  313. SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB));
  314. MadeChange |= ConstantFoldTerminator(&BB, true);
  315. if (!MadeChange) continue;
  316. for (SmallVectorImpl<BasicBlock*>::iterator
  317. II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
  318. if (pred_begin(*II) == pred_end(*II))
  319. WorkList.insert(*II);
  320. }
  321. // Delete the dead blocks and any of their dead successors.
  322. MadeChange |= !WorkList.empty();
  323. while (!WorkList.empty()) {
  324. BasicBlock *BB = *WorkList.begin();
  325. WorkList.erase(BB);
  326. SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
  327. DeleteDeadBlock(BB);
  328. for (SmallVectorImpl<BasicBlock*>::iterator
  329. II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
  330. if (pred_begin(*II) == pred_end(*II))
  331. WorkList.insert(*II);
  332. }
  333. // Merge pairs of basic blocks with unconditional branches, connected by
  334. // a single edge.
  335. if (EverMadeChange || MadeChange)
  336. MadeChange |= eliminateFallThrough(F);
  337. EverMadeChange |= MadeChange;
  338. }
  339. if (!DisableGCOpts) {
  340. SmallVector<Instruction *, 2> Statepoints;
  341. for (BasicBlock &BB : F)
  342. for (Instruction &I : BB)
  343. if (isStatepoint(I))
  344. Statepoints.push_back(&I);
  345. for (auto &I : Statepoints)
  346. EverMadeChange |= simplifyOffsetableRelocate(*I);
  347. }
  348. return EverMadeChange;
  349. }
  350. /// Merge basic blocks which are connected by a single edge, where one of the
  351. /// basic blocks has a single successor pointing to the other basic block,
  352. /// which has a single predecessor.
  353. bool CodeGenPrepare::eliminateFallThrough(Function &F) {
  354. bool Changed = false;
  355. // Scan all of the blocks in the function, except for the entry block.
  356. for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
  357. BasicBlock *BB = &*I++;
  358. // If the destination block has a single pred, then this is a trivial
  359. // edge, just collapse it.
  360. BasicBlock *SinglePred = BB->getSinglePredecessor();
  361. // Don't merge if BB's address is taken.
  362. if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
  363. BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
  364. if (Term && !Term->isConditional()) {
  365. Changed = true;
  366. DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
  367. // Remember if SinglePred was the entry block of the function.
  368. // If so, we will need to move BB back to the entry position.
  369. bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
  370. MergeBasicBlockIntoOnlyPred(BB, nullptr);
  371. if (isEntry && BB != &BB->getParent()->getEntryBlock())
  372. BB->moveBefore(&BB->getParent()->getEntryBlock());
  373. // We have erased a block. Update the iterator.
  374. I = BB->getIterator();
  375. }
  376. }
  377. return Changed;
  378. }
  379. /// Find a destination block from BB if BB is mergeable empty block.
  380. BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
  381. // If this block doesn't end with an uncond branch, ignore it.
  382. BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
  383. if (!BI || !BI->isUnconditional())
  384. return nullptr;
  385. // If the instruction before the branch (skipping debug info) isn't a phi
  386. // node, then other stuff is happening here.
  387. BasicBlock::iterator BBI = BI->getIterator();
  388. if (BBI != BB->begin()) {
  389. --BBI;
  390. while (isa<DbgInfoIntrinsic>(BBI)) {
  391. if (BBI == BB->begin())
  392. break;
  393. --BBI;
  394. }
  395. if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
  396. return nullptr;
  397. }
  398. // Do not break infinite loops.
  399. BasicBlock *DestBB = BI->getSuccessor(0);
  400. if (DestBB == BB)
  401. return nullptr;
  402. if (!canMergeBlocks(BB, DestBB))
  403. DestBB = nullptr;
  404. return DestBB;
  405. }
  406. // Return the unique indirectbr predecessor of a block. This may return null
  407. // even if such a predecessor exists, if it's not useful for splitting.
  408. // If a predecessor is found, OtherPreds will contain all other (non-indirectbr)
  409. // predecessors of BB.
  410. static BasicBlock *
  411. findIBRPredecessor(BasicBlock *BB, SmallVectorImpl<BasicBlock *> &OtherPreds) {
  412. // If the block doesn't have any PHIs, we don't care about it, since there's
  413. // no point in splitting it.
  414. PHINode *PN = dyn_cast<PHINode>(BB->begin());
  415. if (!PN)
  416. return nullptr;
  417. // Verify we have exactly one IBR predecessor.
  418. // Conservatively bail out if one of the other predecessors is not a "regular"
  419. // terminator (that is, not a switch or a br).
  420. BasicBlock *IBB = nullptr;
  421. for (unsigned Pred = 0, E = PN->getNumIncomingValues(); Pred != E; ++Pred) {
  422. BasicBlock *PredBB = PN->getIncomingBlock(Pred);
  423. TerminatorInst *PredTerm = PredBB->getTerminator();
  424. switch (PredTerm->getOpcode()) {
  425. case Instruction::IndirectBr:
  426. if (IBB)
  427. return nullptr;
  428. IBB = PredBB;
  429. break;
  430. case Instruction::Br:
  431. case Instruction::Switch:
  432. OtherPreds.push_back(PredBB);
  433. continue;
  434. default:
  435. return nullptr;
  436. }
  437. }
  438. return IBB;
  439. }
  440. // Split critical edges where the source of the edge is an indirectbr
  441. // instruction. This isn't always possible, but we can handle some easy cases.
  442. // This is useful because MI is unable to split such critical edges,
  443. // which means it will not be able to sink instructions along those edges.
  444. // This is especially painful for indirect branches with many successors, where
  445. // we end up having to prepare all outgoing values in the origin block.
  446. //
  447. // Our normal algorithm for splitting critical edges requires us to update
  448. // the outgoing edges of the edge origin block, but for an indirectbr this
  449. // is hard, since it would require finding and updating the block addresses
  450. // the indirect branch uses. But if a block only has a single indirectbr
  451. // predecessor, with the others being regular branches, we can do it in a
  452. // different way.
  453. // Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr.
  454. // We can split D into D0 and D1, where D0 contains only the PHIs from D,
  455. // and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and
  456. // create the following structure:
  457. // A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1
  458. bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) {
  459. // Check whether the function has any indirectbrs, and collect which blocks
  460. // they may jump to. Since most functions don't have indirect branches,
  461. // this lowers the common case's overhead to O(Blocks) instead of O(Edges).
  462. SmallSetVector<BasicBlock *, 16> Targets;
  463. for (auto &BB : F) {
  464. auto *IBI = dyn_cast<IndirectBrInst>(BB.getTerminator());
  465. if (!IBI)
  466. continue;
  467. for (unsigned Succ = 0, E = IBI->getNumSuccessors(); Succ != E; ++Succ)
  468. Targets.insert(IBI->getSuccessor(Succ));
  469. }
  470. if (Targets.empty())
  471. return false;
  472. bool Changed = false;
  473. for (BasicBlock *Target : Targets) {
  474. SmallVector<BasicBlock *, 16> OtherPreds;
  475. BasicBlock *IBRPred = findIBRPredecessor(Target, OtherPreds);
  476. // If we did not found an indirectbr, or the indirectbr is the only
  477. // incoming edge, this isn't the kind of edge we're looking for.
  478. if (!IBRPred || OtherPreds.empty())
  479. continue;
  480. // Don't even think about ehpads/landingpads.
  481. Instruction *FirstNonPHI = Target->getFirstNonPHI();
  482. if (FirstNonPHI->isEHPad() || Target->isLandingPad())
  483. continue;
  484. BasicBlock *BodyBlock = Target->splitBasicBlock(FirstNonPHI, ".split");
  485. // It's possible Target was its own successor through an indirectbr.
  486. // In this case, the indirectbr now comes from BodyBlock.
  487. if (IBRPred == Target)
  488. IBRPred = BodyBlock;
  489. // At this point Target only has PHIs, and BodyBlock has the rest of the
  490. // block's body. Create a copy of Target that will be used by the "direct"
  491. // preds.
  492. ValueToValueMapTy VMap;
  493. BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F);
  494. for (BasicBlock *Pred : OtherPreds)
  495. Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc);
  496. // Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that
  497. // they are clones, so the number of PHIs are the same.
  498. // (a) Remove the edge coming from IBRPred from the "Direct" PHI
  499. // (b) Leave that as the only edge in the "Indirect" PHI.
  500. // (c) Merge the two in the body block.
  501. BasicBlock::iterator Indirect = Target->begin(),
  502. End = Target->getFirstNonPHI()->getIterator();
  503. BasicBlock::iterator Direct = DirectSucc->begin();
  504. BasicBlock::iterator MergeInsert = BodyBlock->getFirstInsertionPt();
  505. assert(&*End == Target->getTerminator() &&
  506. "Block was expected to only contain PHIs");
  507. while (Indirect != End) {
  508. PHINode *DirPHI = cast<PHINode>(Direct);
  509. PHINode *IndPHI = cast<PHINode>(Indirect);
  510. // Now, clean up - the direct block shouldn't get the indirect value,
  511. // and vice versa.
  512. DirPHI->removeIncomingValue(IBRPred);
  513. Direct++;
  514. // Advance the pointer here, to avoid invalidation issues when the old
  515. // PHI is erased.
  516. Indirect++;
  517. PHINode *NewIndPHI = PHINode::Create(IndPHI->getType(), 1, "ind", IndPHI);
  518. NewIndPHI->addIncoming(IndPHI->getIncomingValueForBlock(IBRPred),
  519. IBRPred);
  520. // Create a PHI in the body block, to merge the direct and indirect
  521. // predecessors.
  522. PHINode *MergePHI =
  523. PHINode::Create(IndPHI->getType(), 2, "merge", &*MergeInsert);
  524. MergePHI->addIncoming(NewIndPHI, Target);
  525. MergePHI->addIncoming(DirPHI, DirectSucc);
  526. IndPHI->replaceAllUsesWith(MergePHI);
  527. IndPHI->eraseFromParent();
  528. }
  529. Changed = true;
  530. }
  531. return Changed;
  532. }
  533. /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
  534. /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
  535. /// edges in ways that are non-optimal for isel. Start by eliminating these
  536. /// blocks so we can split them the way we want them.
  537. bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
  538. SmallPtrSet<BasicBlock *, 16> Preheaders;
  539. SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
  540. while (!LoopList.empty()) {
  541. Loop *L = LoopList.pop_back_val();
  542. LoopList.insert(LoopList.end(), L->begin(), L->end());
  543. if (BasicBlock *Preheader = L->getLoopPreheader())
  544. Preheaders.insert(Preheader);
  545. }
  546. bool MadeChange = false;
  547. // Note that this intentionally skips the entry block.
  548. for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
  549. BasicBlock *BB = &*I++;
  550. BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
  551. if (!DestBB ||
  552. !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
  553. continue;
  554. eliminateMostlyEmptyBlock(BB);
  555. MadeChange = true;
  556. }
  557. return MadeChange;
  558. }
  559. bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
  560. BasicBlock *DestBB,
  561. bool isPreheader) {
  562. // Do not delete loop preheaders if doing so would create a critical edge.
  563. // Loop preheaders can be good locations to spill registers. If the
  564. // preheader is deleted and we create a critical edge, registers may be
  565. // spilled in the loop body instead.
  566. if (!DisablePreheaderProtect && isPreheader &&
  567. !(BB->getSinglePredecessor() &&
  568. BB->getSinglePredecessor()->getSingleSuccessor()))
  569. return false;
  570. // Try to skip merging if the unique predecessor of BB is terminated by a
  571. // switch or indirect branch instruction, and BB is used as an incoming block
  572. // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
  573. // add COPY instructions in the predecessor of BB instead of BB (if it is not
  574. // merged). Note that the critical edge created by merging such blocks wont be
  575. // split in MachineSink because the jump table is not analyzable. By keeping
  576. // such empty block (BB), ISel will place COPY instructions in BB, not in the
  577. // predecessor of BB.
  578. BasicBlock *Pred = BB->getUniquePredecessor();
  579. if (!Pred ||
  580. !(isa<SwitchInst>(Pred->getTerminator()) ||
  581. isa<IndirectBrInst>(Pred->getTerminator())))
  582. return true;
  583. if (BB->getTerminator() != BB->getFirstNonPHI())
  584. return true;
  585. // We use a simple cost heuristic which determine skipping merging is
  586. // profitable if the cost of skipping merging is less than the cost of
  587. // merging : Cost(skipping merging) < Cost(merging BB), where the
  588. // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
  589. // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
  590. // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
  591. // Freq(Pred) / Freq(BB) > 2.
  592. // Note that if there are multiple empty blocks sharing the same incoming
  593. // value for the PHIs in the DestBB, we consider them together. In such
  594. // case, Cost(merging BB) will be the sum of their frequencies.
  595. if (!isa<PHINode>(DestBB->begin()))
  596. return true;
  597. SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
  598. // Find all other incoming blocks from which incoming values of all PHIs in
  599. // DestBB are the same as the ones from BB.
  600. for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E;
  601. ++PI) {
  602. BasicBlock *DestBBPred = *PI;
  603. if (DestBBPred == BB)
  604. continue;
  605. bool HasAllSameValue = true;
  606. BasicBlock::const_iterator DestBBI = DestBB->begin();
  607. while (const PHINode *DestPN = dyn_cast<PHINode>(DestBBI++)) {
  608. if (DestPN->getIncomingValueForBlock(BB) !=
  609. DestPN->getIncomingValueForBlock(DestBBPred)) {
  610. HasAllSameValue = false;
  611. break;
  612. }
  613. }
  614. if (HasAllSameValue)
  615. SameIncomingValueBBs.insert(DestBBPred);
  616. }
  617. // See if all BB's incoming values are same as the value from Pred. In this
  618. // case, no reason to skip merging because COPYs are expected to be place in
  619. // Pred already.
  620. if (SameIncomingValueBBs.count(Pred))
  621. return true;
  622. if (!BFI) {
  623. Function &F = *BB->getParent();
  624. LoopInfo LI{DominatorTree(F)};
  625. BPI.reset(new BranchProbabilityInfo(F, LI));
  626. BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
  627. }
  628. BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
  629. BlockFrequency BBFreq = BFI->getBlockFreq(BB);
  630. for (auto SameValueBB : SameIncomingValueBBs)
  631. if (SameValueBB->getUniquePredecessor() == Pred &&
  632. DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
  633. BBFreq += BFI->getBlockFreq(SameValueBB);
  634. return PredFreq.getFrequency() <=
  635. BBFreq.getFrequency() * FreqRatioToSkipMerge;
  636. }
  637. /// Return true if we can merge BB into DestBB if there is a single
  638. /// unconditional branch between them, and BB contains no other non-phi
  639. /// instructions.
  640. bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
  641. const BasicBlock *DestBB) const {
  642. // We only want to eliminate blocks whose phi nodes are used by phi nodes in
  643. // the successor. If there are more complex condition (e.g. preheaders),
  644. // don't mess around with them.
  645. BasicBlock::const_iterator BBI = BB->begin();
  646. while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
  647. for (const User *U : PN->users()) {
  648. const Instruction *UI = cast<Instruction>(U);
  649. if (UI->getParent() != DestBB || !isa<PHINode>(UI))
  650. return false;
  651. // If User is inside DestBB block and it is a PHINode then check
  652. // incoming value. If incoming value is not from BB then this is
  653. // a complex condition (e.g. preheaders) we want to avoid here.
  654. if (UI->getParent() == DestBB) {
  655. if (const PHINode *UPN = dyn_cast<PHINode>(UI))
  656. for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
  657. Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
  658. if (Insn && Insn->getParent() == BB &&
  659. Insn->getParent() != UPN->getIncomingBlock(I))
  660. return false;
  661. }
  662. }
  663. }
  664. }
  665. // If BB and DestBB contain any common predecessors, then the phi nodes in BB
  666. // and DestBB may have conflicting incoming values for the block. If so, we
  667. // can't merge the block.
  668. const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
  669. if (!DestBBPN) return true; // no conflict.
  670. // Collect the preds of BB.
  671. SmallPtrSet<const BasicBlock*, 16> BBPreds;
  672. if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
  673. // It is faster to get preds from a PHI than with pred_iterator.
  674. for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
  675. BBPreds.insert(BBPN->getIncomingBlock(i));
  676. } else {
  677. BBPreds.insert(pred_begin(BB), pred_end(BB));
  678. }
  679. // Walk the preds of DestBB.
  680. for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
  681. BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
  682. if (BBPreds.count(Pred)) { // Common predecessor?
  683. BBI = DestBB->begin();
  684. while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
  685. const Value *V1 = PN->getIncomingValueForBlock(Pred);
  686. const Value *V2 = PN->getIncomingValueForBlock(BB);
  687. // If V2 is a phi node in BB, look up what the mapped value will be.
  688. if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
  689. if (V2PN->getParent() == BB)
  690. V2 = V2PN->getIncomingValueForBlock(Pred);
  691. // If there is a conflict, bail out.
  692. if (V1 != V2) return false;
  693. }
  694. }
  695. }
  696. return true;
  697. }
  698. /// Eliminate a basic block that has only phi's and an unconditional branch in
  699. /// it.
  700. void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
  701. BranchInst *BI = cast<BranchInst>(BB->getTerminator());
  702. BasicBlock *DestBB = BI->getSuccessor(0);
  703. DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
  704. // If the destination block has a single pred, then this is a trivial edge,
  705. // just collapse it.
  706. if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
  707. if (SinglePred != DestBB) {
  708. // Remember if SinglePred was the entry block of the function. If so, we
  709. // will need to move BB back to the entry position.
  710. bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
  711. MergeBasicBlockIntoOnlyPred(DestBB, nullptr);
  712. if (isEntry && BB != &BB->getParent()->getEntryBlock())
  713. BB->moveBefore(&BB->getParent()->getEntryBlock());
  714. DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
  715. return;
  716. }
  717. }
  718. // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
  719. // to handle the new incoming edges it is about to have.
  720. PHINode *PN;
  721. for (BasicBlock::iterator BBI = DestBB->begin();
  722. (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
  723. // Remove the incoming value for BB, and remember it.
  724. Value *InVal = PN->removeIncomingValue(BB, false);
  725. // Two options: either the InVal is a phi node defined in BB or it is some
  726. // value that dominates BB.
  727. PHINode *InValPhi = dyn_cast<PHINode>(InVal);
  728. if (InValPhi && InValPhi->getParent() == BB) {
  729. // Add all of the input values of the input PHI as inputs of this phi.
  730. for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
  731. PN->addIncoming(InValPhi->getIncomingValue(i),
  732. InValPhi->getIncomingBlock(i));
  733. } else {
  734. // Otherwise, add one instance of the dominating value for each edge that
  735. // we will be adding.
  736. if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
  737. for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
  738. PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
  739. } else {
  740. for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
  741. PN->addIncoming(InVal, *PI);
  742. }
  743. }
  744. }
  745. // The PHIs are now updated, change everything that refers to BB to use
  746. // DestBB and remove BB.
  747. BB->replaceAllUsesWith(DestBB);
  748. BB->eraseFromParent();
  749. ++NumBlocksElim;
  750. DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
  751. }
  752. // Computes a map of base pointer relocation instructions to corresponding
  753. // derived pointer relocation instructions given a vector of all relocate calls
  754. static void computeBaseDerivedRelocateMap(
  755. const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
  756. DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>>
  757. &RelocateInstMap) {
  758. // Collect information in two maps: one primarily for locating the base object
  759. // while filling the second map; the second map is the final structure holding
  760. // a mapping between Base and corresponding Derived relocate calls
  761. DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
  762. for (auto *ThisRelocate : AllRelocateCalls) {
  763. auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
  764. ThisRelocate->getDerivedPtrIndex());
  765. RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
  766. }
  767. for (auto &Item : RelocateIdxMap) {
  768. std::pair<unsigned, unsigned> Key = Item.first;
  769. if (Key.first == Key.second)
  770. // Base relocation: nothing to insert
  771. continue;
  772. GCRelocateInst *I = Item.second;
  773. auto BaseKey = std::make_pair(Key.first, Key.first);
  774. // We're iterating over RelocateIdxMap so we cannot modify it.
  775. auto MaybeBase = RelocateIdxMap.find(BaseKey);
  776. if (MaybeBase == RelocateIdxMap.end())
  777. // TODO: We might want to insert a new base object relocate and gep off
  778. // that, if there are enough derived object relocates.
  779. continue;
  780. RelocateInstMap[MaybeBase->second].push_back(I);
  781. }
  782. }
  783. // Accepts a GEP and extracts the operands into a vector provided they're all
  784. // small integer constants
  785. static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
  786. SmallVectorImpl<Value *> &OffsetV) {
  787. for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
  788. // Only accept small constant integer operands
  789. auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
  790. if (!Op || Op->getZExtValue() > 20)
  791. return false;
  792. }
  793. for (unsigned i = 1; i < GEP->getNumOperands(); i++)
  794. OffsetV.push_back(GEP->getOperand(i));
  795. return true;
  796. }
  797. // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
  798. // replace, computes a replacement, and affects it.
  799. static bool
  800. simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
  801. const SmallVectorImpl<GCRelocateInst *> &Targets) {
  802. bool MadeChange = false;
  803. for (GCRelocateInst *ToReplace : Targets) {
  804. assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
  805. "Not relocating a derived object of the original base object");
  806. if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
  807. // A duplicate relocate call. TODO: coalesce duplicates.
  808. continue;
  809. }
  810. if (RelocatedBase->getParent() != ToReplace->getParent()) {
  811. // Base and derived relocates are in different basic blocks.
  812. // In this case transform is only valid when base dominates derived
  813. // relocate. However it would be too expensive to check dominance
  814. // for each such relocate, so we skip the whole transformation.
  815. continue;
  816. }
  817. Value *Base = ToReplace->getBasePtr();
  818. auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
  819. if (!Derived || Derived->getPointerOperand() != Base)
  820. continue;
  821. SmallVector<Value *, 2> OffsetV;
  822. if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
  823. continue;
  824. // Create a Builder and replace the target callsite with a gep
  825. assert(RelocatedBase->getNextNode() &&
  826. "Should always have one since it's not a terminator");
  827. // Insert after RelocatedBase
  828. IRBuilder<> Builder(RelocatedBase->getNextNode());
  829. Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
  830. // If gc_relocate does not match the actual type, cast it to the right type.
  831. // In theory, there must be a bitcast after gc_relocate if the type does not
  832. // match, and we should reuse it to get the derived pointer. But it could be
  833. // cases like this:
  834. // bb1:
  835. // ...
  836. // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
  837. // br label %merge
  838. //
  839. // bb2:
  840. // ...
  841. // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
  842. // br label %merge
  843. //
  844. // merge:
  845. // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
  846. // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
  847. //
  848. // In this case, we can not find the bitcast any more. So we insert a new bitcast
  849. // no matter there is already one or not. In this way, we can handle all cases, and
  850. // the extra bitcast should be optimized away in later passes.
  851. Value *ActualRelocatedBase = RelocatedBase;
  852. if (RelocatedBase->getType() != Base->getType()) {
  853. ActualRelocatedBase =
  854. Builder.CreateBitCast(RelocatedBase, Base->getType());
  855. }
  856. Value *Replacement = Builder.CreateGEP(
  857. Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
  858. Replacement->takeName(ToReplace);
  859. // If the newly generated derived pointer's type does not match the original derived
  860. // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
  861. Value *ActualReplacement = Replacement;
  862. if (Replacement->getType() != ToReplace->getType()) {
  863. ActualReplacement =
  864. Builder.CreateBitCast(Replacement, ToReplace->getType());
  865. }
  866. ToReplace->replaceAllUsesWith(ActualReplacement);
  867. ToReplace->eraseFromParent();
  868. MadeChange = true;
  869. }
  870. return MadeChange;
  871. }
  872. // Turns this:
  873. //
  874. // %base = ...
  875. // %ptr = gep %base + 15
  876. // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
  877. // %base' = relocate(%tok, i32 4, i32 4)
  878. // %ptr' = relocate(%tok, i32 4, i32 5)
  879. // %val = load %ptr'
  880. //
  881. // into this:
  882. //
  883. // %base = ...
  884. // %ptr = gep %base + 15
  885. // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
  886. // %base' = gc.relocate(%tok, i32 4, i32 4)
  887. // %ptr' = gep %base' + 15
  888. // %val = load %ptr'
  889. bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) {
  890. bool MadeChange = false;
  891. SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
  892. for (auto *U : I.users())
  893. if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
  894. // Collect all the relocate calls associated with a statepoint
  895. AllRelocateCalls.push_back(Relocate);
  896. // We need atleast one base pointer relocation + one derived pointer
  897. // relocation to mangle
  898. if (AllRelocateCalls.size() < 2)
  899. return false;
  900. // RelocateInstMap is a mapping from the base relocate instruction to the
  901. // corresponding derived relocate instructions
  902. DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap;
  903. computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
  904. if (RelocateInstMap.empty())
  905. return false;
  906. for (auto &Item : RelocateInstMap)
  907. // Item.first is the RelocatedBase to offset against
  908. // Item.second is the vector of Targets to replace
  909. MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
  910. return MadeChange;
  911. }
  912. /// SinkCast - Sink the specified cast instruction into its user blocks
  913. static bool SinkCast(CastInst *CI) {
  914. BasicBlock *DefBB = CI->getParent();
  915. /// InsertedCasts - Only insert a cast in each block once.
  916. DenseMap<BasicBlock*, CastInst*> InsertedCasts;
  917. bool MadeChange = false;
  918. for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
  919. UI != E; ) {
  920. Use &TheUse = UI.getUse();
  921. Instruction *User = cast<Instruction>(*UI);
  922. // Figure out which BB this cast is used in. For PHI's this is the
  923. // appropriate predecessor block.
  924. BasicBlock *UserBB = User->getParent();
  925. if (PHINode *PN = dyn_cast<PHINode>(User)) {
  926. UserBB = PN->getIncomingBlock(TheUse);
  927. }
  928. // Preincrement use iterator so we don't invalidate it.
  929. ++UI;
  930. // The first insertion point of a block containing an EH pad is after the
  931. // pad. If the pad is the user, we cannot sink the cast past the pad.
  932. if (User->isEHPad())
  933. continue;
  934. // If the block selected to receive the cast is an EH pad that does not
  935. // allow non-PHI instructions before the terminator, we can't sink the
  936. // cast.
  937. if (UserBB->getTerminator()->isEHPad())
  938. continue;
  939. // If this user is in the same block as the cast, don't change the cast.
  940. if (UserBB == DefBB) continue;
  941. // If we have already inserted a cast into this block, use it.
  942. CastInst *&InsertedCast = InsertedCasts[UserBB];
  943. if (!InsertedCast) {
  944. BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
  945. assert(InsertPt != UserBB->end());
  946. InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
  947. CI->getType(), "", &*InsertPt);
  948. }
  949. // Replace a use of the cast with a use of the new cast.
  950. TheUse = InsertedCast;
  951. MadeChange = true;
  952. ++NumCastUses;
  953. }
  954. // If we removed all uses, nuke the cast.
  955. if (CI->use_empty()) {
  956. CI->eraseFromParent();
  957. MadeChange = true;
  958. }
  959. return MadeChange;
  960. }
  961. /// If the specified cast instruction is a noop copy (e.g. it's casting from
  962. /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
  963. /// reduce the number of virtual registers that must be created and coalesced.
  964. ///
  965. /// Return true if any changes are made.
  966. ///
  967. static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
  968. const DataLayout &DL) {
  969. // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
  970. // than sinking only nop casts, but is helpful on some platforms.
  971. if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
  972. if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(),
  973. ASC->getDestAddressSpace()))
  974. return false;
  975. }
  976. // If this is a noop copy,
  977. EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
  978. EVT DstVT = TLI.getValueType(DL, CI->getType());
  979. // This is an fp<->int conversion?
  980. if (SrcVT.isInteger() != DstVT.isInteger())
  981. return false;
  982. // If this is an extension, it will be a zero or sign extension, which
  983. // isn't a noop.
  984. if (SrcVT.bitsLT(DstVT)) return false;
  985. // If these values will be promoted, find out what they will be promoted
  986. // to. This helps us consider truncates on PPC as noop copies when they
  987. // are.
  988. if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
  989. TargetLowering::TypePromoteInteger)
  990. SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
  991. if (TLI.getTypeAction(CI->getContext(), DstVT) ==
  992. TargetLowering::TypePromoteInteger)
  993. DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
  994. // If, after promotion, these are the same types, this is a noop copy.
  995. if (SrcVT != DstVT)
  996. return false;
  997. return SinkCast(CI);
  998. }
  999. /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if
  1000. /// possible.
  1001. ///
  1002. /// Return true if any changes were made.
  1003. static bool CombineUAddWithOverflow(CmpInst *CI) {
  1004. Value *A, *B;
  1005. Instruction *AddI;
  1006. if (!match(CI,
  1007. m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI))))
  1008. return false;
  1009. Type *Ty = AddI->getType();
  1010. if (!isa<IntegerType>(Ty))
  1011. return false;
  1012. // We don't want to move around uses of condition values this late, so we we
  1013. // check if it is legal to create the call to the intrinsic in the basic
  1014. // block containing the icmp:
  1015. if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse())
  1016. return false;
  1017. #ifndef NDEBUG
  1018. // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption
  1019. // for now:
  1020. if (AddI->hasOneUse())
  1021. assert(*AddI->user_begin() == CI && "expected!");
  1022. #endif
  1023. Module *M = CI->getModule();
  1024. Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty);
  1025. auto *InsertPt = AddI->hasOneUse() ? CI : AddI;
  1026. auto *UAddWithOverflow =
  1027. CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt);
  1028. auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt);
  1029. auto *Overflow =
  1030. ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt);
  1031. CI->replaceAllUsesWith(Overflow);
  1032. AddI->replaceAllUsesWith(UAdd);
  1033. CI->eraseFromParent();
  1034. AddI->eraseFromParent();
  1035. return true;
  1036. }
  1037. /// Sink the given CmpInst into user blocks to reduce the number of virtual
  1038. /// registers that must be created and coalesced. This is a clear win except on
  1039. /// targets with multiple condition code registers (PowerPC), where it might
  1040. /// lose; some adjustment may be wanted there.
  1041. ///
  1042. /// Return true if any changes are made.
  1043. static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) {
  1044. BasicBlock *DefBB = CI->getParent();
  1045. // Avoid sinking soft-FP comparisons, since this can move them into a loop.
  1046. if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI))
  1047. return false;
  1048. // Only insert a cmp in each block once.
  1049. DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
  1050. bool MadeChange = false;
  1051. for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
  1052. UI != E; ) {
  1053. Use &TheUse = UI.getUse();
  1054. Instruction *User = cast<Instruction>(*UI);
  1055. // Preincrement use iterator so we don't invalidate it.
  1056. ++UI;
  1057. // Don't bother for PHI nodes.
  1058. if (isa<PHINode>(User))
  1059. continue;
  1060. // Figure out which BB this cmp is used in.
  1061. BasicBlock *UserBB = User->getParent();
  1062. // If this user is in the same block as the cmp, don't change the cmp.
  1063. if (UserBB == DefBB) continue;
  1064. // If we have already inserted a cmp into this block, use it.
  1065. CmpInst *&InsertedCmp = InsertedCmps[UserBB];
  1066. if (!InsertedCmp) {
  1067. BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
  1068. assert(InsertPt != UserBB->end());
  1069. InsertedCmp =
  1070. CmpInst::Create(CI->getOpcode(), CI->getPredicate(),
  1071. CI->getOperand(0), CI->getOperand(1), "", &*InsertPt);
  1072. // Propagate the debug info.
  1073. InsertedCmp->setDebugLoc(CI->getDebugLoc());
  1074. }
  1075. // Replace a use of the cmp with a use of the new cmp.
  1076. TheUse = InsertedCmp;
  1077. MadeChange = true;
  1078. ++NumCmpUses;
  1079. }
  1080. // If we removed all uses, nuke the cmp.
  1081. if (CI->use_empty()) {
  1082. CI->eraseFromParent();
  1083. MadeChange = true;
  1084. }
  1085. return MadeChange;
  1086. }
  1087. static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) {
  1088. if (SinkCmpExpression(CI, TLI))
  1089. return true;
  1090. if (CombineUAddWithOverflow(CI))
  1091. return true;
  1092. return false;
  1093. }
  1094. /// Duplicate and sink the given 'and' instruction into user blocks where it is
  1095. /// used in a compare to allow isel to generate better code for targets where
  1096. /// this operation can be combined.
  1097. ///
  1098. /// Return true if any changes are made.
  1099. static bool sinkAndCmp0Expression(Instruction *AndI,
  1100. const TargetLowering &TLI,
  1101. SetOfInstrs &InsertedInsts) {
  1102. // Double-check that we're not trying to optimize an instruction that was
  1103. // already optimized by some other part of this pass.
  1104. assert(!InsertedInsts.count(AndI) &&
  1105. "Attempting to optimize already optimized and instruction");
  1106. (void) InsertedInsts;
  1107. // Nothing to do for single use in same basic block.
  1108. if (AndI->hasOneUse() &&
  1109. AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
  1110. return false;
  1111. // Try to avoid cases where sinking/duplicating is likely to increase register
  1112. // pressure.
  1113. if (!isa<ConstantInt>(AndI->getOperand(0)) &&
  1114. !isa<ConstantInt>(AndI->getOperand(1)) &&
  1115. AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
  1116. return false;
  1117. for (auto *U : AndI->users()) {
  1118. Instruction *User = cast<Instruction>(U);
  1119. // Only sink for and mask feeding icmp with 0.
  1120. if (!isa<ICmpInst>(User))
  1121. return false;
  1122. auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
  1123. if (!CmpC || !CmpC->isZero())
  1124. return false;
  1125. }
  1126. if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
  1127. return false;
  1128. DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
  1129. DEBUG(AndI->getParent()->dump());
  1130. // Push the 'and' into the same block as the icmp 0. There should only be
  1131. // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
  1132. // others, so we don't need to keep track of which BBs we insert into.
  1133. for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
  1134. UI != E; ) {
  1135. Use &TheUse = UI.getUse();
  1136. Instruction *User = cast<Instruction>(*UI);
  1137. // Preincrement use iterator so we don't invalidate it.
  1138. ++UI;
  1139. DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
  1140. // Keep the 'and' in the same place if the use is already in the same block.
  1141. Instruction *InsertPt =
  1142. User->getParent() == AndI->getParent() ? AndI : User;
  1143. Instruction *InsertedAnd =
  1144. BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
  1145. AndI->getOperand(1), "", InsertPt);
  1146. // Propagate the debug info.
  1147. InsertedAnd->setDebugLoc(AndI->getDebugLoc());
  1148. // Replace a use of the 'and' with a use of the new 'and'.
  1149. TheUse = InsertedAnd;
  1150. ++NumAndUses;
  1151. DEBUG(User->getParent()->dump());
  1152. }
  1153. // We removed all uses, nuke the and.
  1154. AndI->eraseFromParent();
  1155. return true;
  1156. }
  1157. /// Check if the candidates could be combined with a shift instruction, which
  1158. /// includes:
  1159. /// 1. Truncate instruction
  1160. /// 2. And instruction and the imm is a mask of the low bits:
  1161. /// imm & (imm+1) == 0
  1162. static bool isExtractBitsCandidateUse(Instruction *User) {
  1163. if (!isa<TruncInst>(User)) {
  1164. if (User->getOpcode() != Instruction::And ||
  1165. !isa<ConstantInt>(User->getOperand(1)))
  1166. return false;
  1167. const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
  1168. if ((Cimm & (Cimm + 1)).getBoolValue())
  1169. return false;
  1170. }
  1171. return true;
  1172. }
  1173. /// Sink both shift and truncate instruction to the use of truncate's BB.
  1174. static bool
  1175. SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
  1176. DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
  1177. const TargetLowering &TLI, const DataLayout &DL) {
  1178. BasicBlock *UserBB = User->getParent();
  1179. DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
  1180. TruncInst *TruncI = dyn_cast<TruncInst>(User);
  1181. bool MadeChange = false;
  1182. for (Value::user_iterator TruncUI = TruncI->user_begin(),
  1183. TruncE = TruncI->user_end();
  1184. TruncUI != TruncE;) {
  1185. Use &TruncTheUse = TruncUI.getUse();
  1186. Instruction *TruncUser = cast<Instruction>(*TruncUI);
  1187. // Preincrement use iterator so we don't invalidate it.
  1188. ++TruncUI;
  1189. int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
  1190. if (!ISDOpcode)
  1191. continue;
  1192. // If the use is actually a legal node, there will not be an
  1193. // implicit truncate.
  1194. // FIXME: always querying the result type is just an
  1195. // approximation; some nodes' legality is determined by the
  1196. // operand or other means. There's no good way to find out though.
  1197. if (TLI.isOperationLegalOrCustom(
  1198. ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
  1199. continue;
  1200. // Don't bother for PHI nodes.
  1201. if (isa<PHINode>(TruncUser))
  1202. continue;
  1203. BasicBlock *TruncUserBB = TruncUser->getParent();
  1204. if (UserBB == TruncUserBB)
  1205. continue;
  1206. BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
  1207. CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
  1208. if (!InsertedShift && !InsertedTrunc) {
  1209. BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
  1210. assert(InsertPt != TruncUserBB->end());
  1211. // Sink the shift
  1212. if (ShiftI->getOpcode() == Instruction::AShr)
  1213. InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
  1214. "", &*InsertPt);
  1215. else
  1216. InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
  1217. "", &*InsertPt);
  1218. // Sink the trunc
  1219. BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
  1220. TruncInsertPt++;
  1221. assert(TruncInsertPt != TruncUserBB->end());
  1222. InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
  1223. TruncI->getType(), "", &*TruncInsertPt);
  1224. MadeChange = true;
  1225. TruncTheUse = InsertedTrunc;
  1226. }
  1227. }
  1228. return MadeChange;
  1229. }
  1230. /// Sink the shift *right* instruction into user blocks if the uses could
  1231. /// potentially be combined with this shift instruction and generate BitExtract
  1232. /// instruction. It will only be applied if the architecture supports BitExtract
  1233. /// instruction. Here is an example:
  1234. /// BB1:
  1235. /// %x.extract.shift = lshr i64 %arg1, 32
  1236. /// BB2:
  1237. /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
  1238. /// ==>
  1239. ///
  1240. /// BB2:
  1241. /// %x.extract.shift.1 = lshr i64 %arg1, 32
  1242. /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
  1243. ///
  1244. /// CodeGen will recoginze the pattern in BB2 and generate BitExtract
  1245. /// instruction.
  1246. /// Return true if any changes are made.
  1247. static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
  1248. const TargetLowering &TLI,
  1249. const DataLayout &DL) {
  1250. BasicBlock *DefBB = ShiftI->getParent();
  1251. /// Only insert instructions in each block once.
  1252. DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
  1253. bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
  1254. bool MadeChange = false;
  1255. for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
  1256. UI != E;) {
  1257. Use &TheUse = UI.getUse();
  1258. Instruction *User = cast<Instruction>(*UI);
  1259. // Preincrement use iterator so we don't invalidate it.
  1260. ++UI;
  1261. // Don't bother for PHI nodes.
  1262. if (isa<PHINode>(User))
  1263. continue;
  1264. if (!isExtractBitsCandidateUse(User))
  1265. continue;
  1266. BasicBlock *UserBB = User->getParent();
  1267. if (UserBB == DefBB) {
  1268. // If the shift and truncate instruction are in the same BB. The use of
  1269. // the truncate(TruncUse) may still introduce another truncate if not
  1270. // legal. In this case, we would like to sink both shift and truncate
  1271. // instruction to the BB of TruncUse.
  1272. // for example:
  1273. // BB1:
  1274. // i64 shift.result = lshr i64 opnd, imm
  1275. // trunc.result = trunc shift.result to i16
  1276. //
  1277. // BB2:
  1278. // ----> We will have an implicit truncate here if the architecture does
  1279. // not have i16 compare.
  1280. // cmp i16 trunc.result, opnd2
  1281. //
  1282. if (isa<TruncInst>(User) && shiftIsLegal
  1283. // If the type of the truncate is legal, no trucate will be
  1284. // introduced in other basic blocks.
  1285. &&
  1286. (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
  1287. MadeChange =
  1288. SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
  1289. continue;
  1290. }
  1291. // If we have already inserted a shift into this block, use it.
  1292. BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
  1293. if (!InsertedShift) {
  1294. BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
  1295. assert(InsertPt != UserBB->end());
  1296. if (ShiftI->getOpcode() == Instruction::AShr)
  1297. InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
  1298. "", &*InsertPt);
  1299. else
  1300. InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
  1301. "", &*InsertPt);
  1302. MadeChange = true;
  1303. }
  1304. // Replace a use of the shift with a use of the new shift.
  1305. TheUse = InsertedShift;
  1306. }
  1307. // If we removed all uses, nuke the shift.
  1308. if (ShiftI->use_empty())
  1309. ShiftI->eraseFromParent();
  1310. return MadeChange;
  1311. }
  1312. // Translate a masked load intrinsic like
  1313. // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align,
  1314. // <16 x i1> %mask, <16 x i32> %passthru)
  1315. // to a chain of basic blocks, with loading element one-by-one if
  1316. // the appropriate mask bit is set
  1317. //
  1318. // %1 = bitcast i8* %addr to i32*
  1319. // %2 = extractelement <16 x i1> %mask, i32 0
  1320. // %3 = icmp eq i1 %2, true
  1321. // br i1 %3, label %cond.load, label %else
  1322. //
  1323. //cond.load: ; preds = %0
  1324. // %4 = getelementptr i32* %1, i32 0
  1325. // %5 = load i32* %4
  1326. // %6 = insertelement <16 x i32> undef, i32 %5, i32 0
  1327. // br label %else
  1328. //
  1329. //else: ; preds = %0, %cond.load
  1330. // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ]
  1331. // %7 = extractelement <16 x i1> %mask, i32 1
  1332. // %8 = icmp eq i1 %7, true
  1333. // br i1 %8, label %cond.load1, label %else2
  1334. //
  1335. //cond.load1: ; preds = %else
  1336. // %9 = getelementptr i32* %1, i32 1
  1337. // %10 = load i32* %9
  1338. // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1
  1339. // br label %else2
  1340. //
  1341. //else2: ; preds = %else, %cond.load1
  1342. // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
  1343. // %12 = extractelement <16 x i1> %mask, i32 2
  1344. // %13 = icmp eq i1 %12, true
  1345. // br i1 %13, label %cond.load4, label %else5
  1346. //
  1347. static void scalarizeMaskedLoad(CallInst *CI) {
  1348. Value *Ptr = CI->getArgOperand(0);
  1349. Value *Alignment = CI->getArgOperand(1);
  1350. Value *Mask = CI->getArgOperand(2);
  1351. Value *Src0 = CI->getArgOperand(3);
  1352. unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
  1353. VectorType *VecType = dyn_cast<VectorType>(CI->getType());
  1354. assert(VecType && "Unexpected return type of masked load intrinsic");
  1355. Type *EltTy = CI->getType()->getVectorElementType();
  1356. IRBuilder<> Builder(CI->getContext());
  1357. Instruction *InsertPt = CI;
  1358. BasicBlock *IfBlock = CI->getParent();
  1359. BasicBlock *CondBlock = nullptr;
  1360. BasicBlock *PrevIfBlock = CI->getParent();
  1361. Builder.SetInsertPoint(InsertPt);
  1362. Builder.SetCurrentDebugLocation(CI->getDebugLoc());
  1363. // Short-cut if the mask is all-true.
  1364. bool IsAllOnesMask = isa<Constant>(Mask) &&
  1365. cast<Constant>(Mask)->isAllOnesValue();
  1366. if (IsAllOnesMask) {
  1367. Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal);
  1368. CI->replaceAllUsesWith(NewI);
  1369. CI->eraseFromParent();
  1370. return;
  1371. }
  1372. // Adjust alignment for the scalar instruction.
  1373. AlignVal = std::min(AlignVal, VecType->getScalarSizeInBits()/8);
  1374. // Bitcast %addr fron i8* to EltTy*
  1375. Type *NewPtrType =
  1376. EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
  1377. Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
  1378. unsigned VectorWidth = VecType->getNumElements();
  1379. Value *UndefVal = UndefValue::get(VecType);
  1380. // The result vector
  1381. Value *VResult = UndefVal;
  1382. if (isa<ConstantVector>(Mask)) {
  1383. for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
  1384. if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue())
  1385. continue;
  1386. Value *Gep =
  1387. Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
  1388. LoadInst* Load = Builder.CreateAlignedLoad(Gep, AlignVal);
  1389. VResult = Builder.CreateInsertElement(VResult, Load,
  1390. Builder.getInt32(Idx));
  1391. }
  1392. Value *NewI = Builder.CreateSelect(Mask, VResult, Src0);
  1393. CI->replaceAllUsesWith(NewI);
  1394. CI->eraseFromParent();
  1395. return;
  1396. }
  1397. PHINode *Phi = nullptr;
  1398. Value *PrevPhi = UndefVal;
  1399. for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
  1400. // Fill the "else" block, created in the previous iteration
  1401. //
  1402. // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
  1403. // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
  1404. // %to_load = icmp eq i1 %mask_1, true
  1405. // br i1 %to_load, label %cond.load, label %else
  1406. //
  1407. if (Idx > 0) {
  1408. Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
  1409. Phi->addIncoming(VResult, CondBlock);
  1410. Phi->addIncoming(PrevPhi, PrevIfBlock);
  1411. PrevPhi = Phi;
  1412. VResult = Phi;
  1413. }
  1414. Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
  1415. Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
  1416. ConstantInt::get(Predicate->getType(), 1));
  1417. // Create "cond" block
  1418. //
  1419. // %EltAddr = getelementptr i32* %1, i32 0
  1420. // %Elt = load i32* %EltAddr
  1421. // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
  1422. //
  1423. CondBlock = IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.load");
  1424. Builder.SetInsertPoint(InsertPt);
  1425. Value *Gep =
  1426. Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
  1427. LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal);
  1428. VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx));
  1429. // Create "else" block, fill it in the next iteration
  1430. BasicBlock *NewIfBlock =
  1431. CondBlock->splitBasicBlock(InsertPt->getIterator(), "else");
  1432. Builder.SetInsertPoint(InsertPt);
  1433. Instruction *OldBr = IfBlock->getTerminator();
  1434. BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
  1435. OldBr->eraseFromParent();
  1436. PrevIfBlock = IfBlock;
  1437. IfBlock = NewIfBlock;
  1438. }
  1439. Phi = Builder.CreatePHI(VecType, 2, "res.phi.select");
  1440. Phi->addIncoming(VResult, CondBlock);
  1441. Phi->addIncoming(PrevPhi, PrevIfBlock);
  1442. Value *NewI = Builder.CreateSelect(Mask, Phi, Src0);
  1443. CI->replaceAllUsesWith(NewI);
  1444. CI->eraseFromParent();
  1445. }
  1446. // Translate a masked store intrinsic, like
  1447. // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align,
  1448. // <16 x i1> %mask)
  1449. // to a chain of basic blocks, that stores element one-by-one if
  1450. // the appropriate mask bit is set
  1451. //
  1452. // %1 = bitcast i8* %addr to i32*
  1453. // %2 = extractelement <16 x i1> %mask, i32 0
  1454. // %3 = icmp eq i1 %2, true
  1455. // br i1 %3, label %cond.store, label %else
  1456. //
  1457. // cond.store: ; preds = %0
  1458. // %4 = extractelement <16 x i32> %val, i32 0
  1459. // %5 = getelementptr i32* %1, i32 0
  1460. // store i32 %4, i32* %5
  1461. // br label %else
  1462. //
  1463. // else: ; preds = %0, %cond.store
  1464. // %6 = extractelement <16 x i1> %mask, i32 1
  1465. // %7 = icmp eq i1 %6, true
  1466. // br i1 %7, label %cond.store1, label %else2
  1467. //
  1468. // cond.store1: ; preds = %else
  1469. // %8 = extractelement <16 x i32> %val, i32 1
  1470. // %9 = getelementptr i32* %1, i32 1
  1471. // store i32 %8, i32* %9
  1472. // br label %else2
  1473. // . . .
  1474. static void scalarizeMaskedStore(CallInst *CI) {
  1475. Value *Src = CI->getArgOperand(0);
  1476. Value *Ptr = CI->getArgOperand(1);
  1477. Value *Alignment = CI->getArgOperand(2);
  1478. Value *Mask = CI->getArgOperand(3);
  1479. unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
  1480. VectorType *VecType = dyn_cast<VectorType>(Src->getType());
  1481. assert(VecType && "Unexpected data type in masked store intrinsic");
  1482. Type *EltTy = VecType->getElementType();
  1483. IRBuilder<> Builder(CI->getContext());
  1484. Instruction *InsertPt = CI;
  1485. BasicBlock *IfBlock = CI->getParent();
  1486. Builder.SetInsertPoint(InsertPt);
  1487. Builder.SetCurrentDebugLocation(CI->getDebugLoc());
  1488. // Short-cut if the mask is all-true.
  1489. bool IsAllOnesMask = isa<Constant>(Mask) &&
  1490. cast<Constant>(Mask)->isAllOnesValue();
  1491. if (IsAllOnesMask) {
  1492. Builder.CreateAlignedStore(Src, Ptr, AlignVal);
  1493. CI->eraseFromParent();
  1494. return;
  1495. }
  1496. // Adjust alignment for the scalar instruction.
  1497. AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()/8);
  1498. // Bitcast %addr fron i8* to EltTy*
  1499. Type *NewPtrType =
  1500. EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
  1501. Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
  1502. unsigned VectorWidth = VecType->getNumElements();
  1503. if (isa<ConstantVector>(Mask)) {
  1504. for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
  1505. if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue())
  1506. continue;
  1507. Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
  1508. Value *Gep =
  1509. Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
  1510. Builder.CreateAlignedStore(OneElt, Gep, AlignVal);
  1511. }
  1512. CI->eraseFromParent();
  1513. return;
  1514. }
  1515. for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
  1516. // Fill the "else" block, created in the previous iteration
  1517. //
  1518. // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
  1519. // %to_store = icmp eq i1 %mask_1, true
  1520. // br i1 %to_store, label %cond.store, label %else
  1521. //
  1522. Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
  1523. Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
  1524. ConstantInt::get(Predicate->getType(), 1));
  1525. // Create "cond" block
  1526. //
  1527. // %OneElt = extractelement <16 x i32> %Src, i32 Idx
  1528. // %EltAddr = getelementptr i32* %1, i32 0
  1529. // %store i32 %OneElt, i32* %EltAddr
  1530. //
  1531. BasicBlock *CondBlock =
  1532. IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.store");
  1533. Builder.SetInsertPoint(InsertPt);
  1534. Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
  1535. Value *Gep =
  1536. Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
  1537. Builder.CreateAlignedStore(OneElt, Gep, AlignVal);
  1538. // Create "else" block, fill it in the next iteration
  1539. BasicBlock *NewIfBlock =
  1540. CondBlock->splitBasicBlock(InsertPt->getIterator(), "else");
  1541. Builder.SetInsertPoint(InsertPt);
  1542. Instruction *OldBr = IfBlock->getTerminator();
  1543. BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
  1544. OldBr->eraseFromParent();
  1545. IfBlock = NewIfBlock;
  1546. }
  1547. CI->eraseFromParent();
  1548. }
  1549. // Translate a masked gather intrinsic like
  1550. // <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4,
  1551. // <16 x i1> %Mask, <16 x i32> %Src)
  1552. // to a chain of basic blocks, with loading element one-by-one if
  1553. // the appropriate mask bit is set
  1554. //
  1555. // % Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind
  1556. // % Mask0 = extractelement <16 x i1> %Mask, i32 0
  1557. // % ToLoad0 = icmp eq i1 % Mask0, true
  1558. // br i1 % ToLoad0, label %cond.load, label %else
  1559. //
  1560. // cond.load:
  1561. // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
  1562. // % Load0 = load i32, i32* % Ptr0, align 4
  1563. // % Res0 = insertelement <16 x i32> undef, i32 % Load0, i32 0
  1564. // br label %else
  1565. //
  1566. // else:
  1567. // %res.phi.else = phi <16 x i32>[% Res0, %cond.load], [undef, % 0]
  1568. // % Mask1 = extractelement <16 x i1> %Mask, i32 1
  1569. // % ToLoad1 = icmp eq i1 % Mask1, true
  1570. // br i1 % ToLoad1, label %cond.load1, label %else2
  1571. //
  1572. // cond.load1:
  1573. // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
  1574. // % Load1 = load i32, i32* % Ptr1, align 4
  1575. // % Res1 = insertelement <16 x i32> %res.phi.else, i32 % Load1, i32 1
  1576. // br label %else2
  1577. // . . .
  1578. // % Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src
  1579. // ret <16 x i32> %Result
  1580. static void scalarizeMaskedGather(CallInst *CI) {
  1581. Value *Ptrs = CI->getArgOperand(0);
  1582. Value *Alignment = CI->getArgOperand(1);
  1583. Value *Mask = CI->getArgOperand(2);
  1584. Value *Src0 = CI->getArgOperand(3);
  1585. VectorType *VecType = dyn_cast<VectorType>(CI->getType());
  1586. assert(VecType && "Unexpected return type of masked load intrinsic");
  1587. IRBuilder<> Builder(CI->getContext());
  1588. Instruction *InsertPt = CI;
  1589. BasicBlock *IfBlock = CI->getParent();
  1590. BasicBlock *CondBlock = nullptr;
  1591. BasicBlock *PrevIfBlock = CI->getParent();
  1592. Builder.SetInsertPoint(InsertPt);
  1593. unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
  1594. Builder.SetCurrentDebugLocation(CI->getDebugLoc());
  1595. Value *UndefVal = UndefValue::get(VecType);
  1596. // The result vector
  1597. Value *VResult = UndefVal;
  1598. unsigned VectorWidth = VecType->getNumElements();
  1599. // Shorten the way if the mask is a vector of constants.
  1600. bool IsConstMask = isa<ConstantVector>(Mask);
  1601. if (IsConstMask) {
  1602. for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
  1603. if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue())
  1604. continue;
  1605. Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
  1606. "Ptr" + Twine(Idx));
  1607. LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal,
  1608. "Load" + Twine(Idx));
  1609. VResult = Builder.CreateInsertElement(VResult, Load,
  1610. Builder.getInt32(Idx),
  1611. "Res" + Twine(Idx));
  1612. }
  1613. Value *NewI = Builder.CreateSelect(Mask, VResult, Src0);
  1614. CI->replaceAllUsesWith(NewI);
  1615. CI->eraseFromParent();
  1616. return;
  1617. }
  1618. PHINode *Phi = nullptr;
  1619. Value *PrevPhi = UndefVal;
  1620. for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
  1621. // Fill the "else" block, created in the previous iteration
  1622. //
  1623. // %Mask1 = extractelement <16 x i1> %Mask, i32 1
  1624. // %ToLoad1 = icmp eq i1 %Mask1, true
  1625. // br i1 %ToLoad1, label %cond.load, label %else
  1626. //
  1627. if (Idx > 0) {
  1628. Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
  1629. Phi->addIncoming(VResult, CondBlock);
  1630. Phi->addIncoming(PrevPhi, PrevIfBlock);
  1631. PrevPhi = Phi;
  1632. VResult = Phi;
  1633. }
  1634. Value *Predicate = Builder.CreateExtractElement(Mask,
  1635. Builder.getInt32(Idx),
  1636. "Mask" + Twine(Idx));
  1637. Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
  1638. ConstantInt::get(Predicate->getType(), 1),
  1639. "ToLoad" + Twine(Idx));
  1640. // Create "cond" block
  1641. //
  1642. // %EltAddr = getelementptr i32* %1, i32 0
  1643. // %Elt = load i32* %EltAddr
  1644. // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
  1645. //
  1646. CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load");
  1647. Builder.SetInsertPoint(InsertPt);
  1648. Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
  1649. "Ptr" + Twine(Idx));
  1650. LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal,
  1651. "Load" + Twine(Idx));
  1652. VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx),
  1653. "Res" + Twine(Idx));
  1654. // Create "else" block, fill it in the next iteration
  1655. BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
  1656. Builder.SetInsertPoint(InsertPt);
  1657. Instruction *OldBr = IfBlock->getTerminator();
  1658. BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
  1659. OldBr->eraseFromParent();
  1660. PrevIfBlock = IfBlock;
  1661. IfBlock = NewIfBlock;
  1662. }
  1663. Phi = Builder.CreatePHI(VecType, 2, "res.phi.select");
  1664. Phi->addIncoming(VResult, CondBlock);
  1665. Phi->addIncoming(PrevPhi, PrevIfBlock);
  1666. Value *NewI = Builder.CreateSelect(Mask, Phi, Src0);
  1667. CI->replaceAllUsesWith(NewI);
  1668. CI->eraseFromParent();
  1669. }
  1670. // Translate a masked scatter intrinsic, like
  1671. // void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4,
  1672. // <16 x i1> %Mask)
  1673. // to a chain of basic blocks, that stores element one-by-one if
  1674. // the appropriate mask bit is set.
  1675. //
  1676. // % Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind
  1677. // % Mask0 = extractelement <16 x i1> % Mask, i32 0
  1678. // % ToStore0 = icmp eq i1 % Mask0, true
  1679. // br i1 %ToStore0, label %cond.store, label %else
  1680. //
  1681. // cond.store:
  1682. // % Elt0 = extractelement <16 x i32> %Src, i32 0
  1683. // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
  1684. // store i32 %Elt0, i32* % Ptr0, align 4
  1685. // br label %else
  1686. //
  1687. // else:
  1688. // % Mask1 = extractelement <16 x i1> % Mask, i32 1
  1689. // % ToStore1 = icmp eq i1 % Mask1, true
  1690. // br i1 % ToStore1, label %cond.store1, label %else2
  1691. //
  1692. // cond.store1:
  1693. // % Elt1 = extractelement <16 x i32> %Src, i32 1
  1694. // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
  1695. // store i32 % Elt1, i32* % Ptr1, align 4
  1696. // br label %else2
  1697. // . . .
  1698. static void scalarizeMaskedScatter(CallInst *CI) {
  1699. Value *Src = CI->getArgOperand(0);
  1700. Value *Ptrs = CI->getArgOperand(1);
  1701. Value *Alignment = CI->getArgOperand(2);
  1702. Value *Mask = CI->getArgOperand(3);
  1703. assert(isa<VectorType>(Src->getType()) &&
  1704. "Unexpected data type in masked scatter intrinsic");
  1705. assert(isa<VectorType>(Ptrs->getType()) &&
  1706. isa<PointerType>(Ptrs->getType()->getVectorElementType()) &&
  1707. "Vector of pointers is expected in masked scatter intrinsic");
  1708. IRBuilder<> Builder(CI->getContext());
  1709. Instruction *InsertPt = CI;
  1710. BasicBlock *IfBlock = CI->getParent();
  1711. Builder.SetInsertPoint(InsertPt);
  1712. Builder.SetCurrentDebugLocation(CI->getDebugLoc());
  1713. unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
  1714. unsigned VectorWidth = Src->getType()->getVectorNumElements();
  1715. // Shorten the way if the mask is a vector of constants.
  1716. bool IsConstMask = isa<ConstantVector>(Mask);
  1717. if (IsConstMask) {
  1718. for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
  1719. if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue())
  1720. continue;
  1721. Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx),
  1722. "Elt" + Twine(Idx));
  1723. Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
  1724. "Ptr" + Twine(Idx));
  1725. Builder.CreateAlignedStore(OneElt, Ptr, AlignVal);
  1726. }
  1727. CI->eraseFromParent();
  1728. return;
  1729. }
  1730. for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
  1731. // Fill the "else" block, created in the previous iteration
  1732. //
  1733. // % Mask1 = extractelement <16 x i1> % Mask, i32 Idx
  1734. // % ToStore = icmp eq i1 % Mask1, true
  1735. // br i1 % ToStore, label %cond.store, label %else
  1736. //
  1737. Value *Predicate = Builder.CreateExtractElement(Mask,
  1738. Builder.getInt32(Idx),
  1739. "Mask" + Twine(Idx));
  1740. Value *Cmp =
  1741. Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
  1742. ConstantInt::get(Predicate->getType(), 1),
  1743. "ToStore" + Twine(Idx));
  1744. // Create "cond" block
  1745. //
  1746. // % Elt1 = extractelement <16 x i32> %Src, i32 1
  1747. // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
  1748. // %store i32 % Elt1, i32* % Ptr1
  1749. //
  1750. BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store");
  1751. Builder.SetInsertPoint(InsertPt);
  1752. Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx),
  1753. "Elt" + Twine(Idx));
  1754. Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
  1755. "Ptr" + Twine(Idx));
  1756. Builder.CreateAlignedStore(OneElt, Ptr, AlignVal);
  1757. // Create "else" block, fill it in the next iteration
  1758. BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
  1759. Builder.SetInsertPoint(InsertPt);
  1760. Instruction *OldBr = IfBlock->getTerminator();
  1761. BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
  1762. OldBr->eraseFromParent();
  1763. IfBlock = NewIfBlock;
  1764. }
  1765. CI->eraseFromParent();
  1766. }
  1767. /// If counting leading or trailing zeros is an expensive operation and a zero
  1768. /// input is defined, add a check for zero to avoid calling the intrinsic.
  1769. ///
  1770. /// We want to transform:
  1771. /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
  1772. ///
  1773. /// into:
  1774. /// entry:
  1775. /// %cmpz = icmp eq i64 %A, 0
  1776. /// br i1 %cmpz, label %cond.end, label %cond.false
  1777. /// cond.false:
  1778. /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
  1779. /// br label %cond.end
  1780. /// cond.end:
  1781. /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
  1782. ///
  1783. /// If the transform is performed, return true and set ModifiedDT to true.
  1784. static bool despeculateCountZeros(IntrinsicInst *CountZeros,
  1785. const TargetLowering *TLI,
  1786. const DataLayout *DL,
  1787. bool &ModifiedDT) {
  1788. if (!TLI || !DL)
  1789. return false;
  1790. // If a zero input is undefined, it doesn't make sense to despeculate that.
  1791. if (match(CountZeros->getOperand(1), m_One()))
  1792. return false;
  1793. // If it's cheap to speculate, there's nothing to do.
  1794. auto IntrinsicID = CountZeros->getIntrinsicID();
  1795. if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) ||
  1796. (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz()))
  1797. return false;
  1798. // Only handle legal scalar cases. Anything else requires too much work.
  1799. Type *Ty = CountZeros->getType();
  1800. unsigned SizeInBits = Ty->getPrimitiveSizeInBits();
  1801. if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
  1802. return false;
  1803. // The intrinsic will be sunk behind a compare against zero and branch.
  1804. BasicBlock *StartBlock = CountZeros->getParent();
  1805. BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
  1806. // Create another block after the count zero intrinsic. A PHI will be added
  1807. // in this block to select the result of the intrinsic or the bit-width
  1808. // constant if the input to the intrinsic is zero.
  1809. BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
  1810. BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
  1811. // Set up a builder to create a compare, conditional branch, and PHI.
  1812. IRBuilder<> Builder(CountZeros->getContext());
  1813. Builder.SetInsertPoint(StartBlock->getTerminator());
  1814. Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
  1815. // Replace the unconditional branch that was created by the first split with
  1816. // a compare against zero and a conditional branch.
  1817. Value *Zero = Constant::getNullValue(Ty);
  1818. Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz");
  1819. Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
  1820. StartBlock->getTerminator()->eraseFromParent();
  1821. // Create a PHI in the end block to select either the output of the intrinsic
  1822. // or the bit width of the operand.
  1823. Builder.SetInsertPoint(&EndBlock->front());
  1824. PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
  1825. CountZeros->replaceAllUsesWith(PN);
  1826. Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
  1827. PN->addIncoming(BitWidth, StartBlock);
  1828. PN->addIncoming(CountZeros, CallBlock);
  1829. // We are explicitly handling the zero case, so we can set the intrinsic's
  1830. // undefined zero argument to 'true'. This will also prevent reprocessing the
  1831. // intrinsic; we only despeculate when a zero input is defined.
  1832. CountZeros->setArgOperand(1, Builder.getTrue());
  1833. ModifiedDT = true;
  1834. return true;
  1835. }
  1836. bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) {
  1837. BasicBlock *BB = CI->getParent();
  1838. // Lower inline assembly if we can.
  1839. // If we found an inline asm expession, and if the target knows how to
  1840. // lower it to normal LLVM code, do so now.
  1841. if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
  1842. if (TLI->ExpandInlineAsm(CI)) {
  1843. // Avoid invalidating the iterator.
  1844. CurInstIterator = BB->begin();
  1845. // Avoid processing instructions out of order, which could cause
  1846. // reuse before a value is defined.
  1847. SunkAddrs.clear();
  1848. return true;
  1849. }
  1850. // Sink address computing for memory operands into the block.
  1851. if (optimizeInlineAsmInst(CI))
  1852. return true;
  1853. }
  1854. // Align the pointer arguments to this call if the target thinks it's a good
  1855. // idea
  1856. unsigned MinSize, PrefAlign;
  1857. if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
  1858. for (auto &Arg : CI->arg_operands()) {
  1859. // We want to align both objects whose address is used directly and
  1860. // objects whose address is used in casts and GEPs, though it only makes
  1861. // sense for GEPs if the offset is a multiple of the desired alignment and
  1862. // if size - offset meets the size threshold.
  1863. if (!Arg->getType()->isPointerTy())
  1864. continue;
  1865. APInt Offset(DL->getPointerSizeInBits(
  1866. cast<PointerType>(Arg->getType())->getAddressSpace()),
  1867. 0);
  1868. Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
  1869. uint64_t Offset2 = Offset.getLimitedValue();
  1870. if ((Offset2 & (PrefAlign-1)) != 0)
  1871. continue;
  1872. AllocaInst *AI;
  1873. if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign &&
  1874. DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
  1875. AI->setAlignment(PrefAlign);
  1876. // Global variables can only be aligned if they are defined in this
  1877. // object (i.e. they are uniquely initialized in this object), and
  1878. // over-aligning global variables that have an explicit section is
  1879. // forbidden.
  1880. GlobalVariable *GV;
  1881. if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
  1882. GV->getPointerAlignment(*DL) < PrefAlign &&
  1883. DL->getTypeAllocSize(GV->getValueType()) >=
  1884. MinSize + Offset2)
  1885. GV->setAlignment(PrefAlign);
  1886. }
  1887. // If this is a memcpy (or similar) then we may be able to improve the
  1888. // alignment
  1889. if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
  1890. unsigned Align = getKnownAlignment(MI->getDest(), *DL);
  1891. if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
  1892. Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL));
  1893. if (Align > MI->getAlignment())
  1894. MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align));
  1895. }
  1896. }
  1897. // If we have a cold call site, try to sink addressing computation into the
  1898. // cold block. This interacts with our handling for loads and stores to
  1899. // ensure that we can fold all uses of a potential addressing computation
  1900. // into their uses. TODO: generalize this to work over profiling data
  1901. if (!OptSize && CI->hasFnAttr(Attribute::Cold))
  1902. for (auto &Arg : CI->arg_operands()) {
  1903. if (!Arg->getType()->isPointerTy())
  1904. continue;
  1905. unsigned AS = Arg->getType()->getPointerAddressSpace();
  1906. return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
  1907. }
  1908. IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
  1909. if (II) {
  1910. switch (II->getIntrinsicID()) {
  1911. default: break;
  1912. case Intrinsic::objectsize: {
  1913. // Lower all uses of llvm.objectsize.*
  1914. ConstantInt *RetVal =
  1915. lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true);
  1916. // Substituting this can cause recursive simplifications, which can
  1917. // invalidate our iterator. Use a WeakVH to hold onto it in case this
  1918. // happens.
  1919. Value *CurValue = &*CurInstIterator;
  1920. WeakVH IterHandle(CurValue);
  1921. replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
  1922. // If the iterator instruction was recursively deleted, start over at the
  1923. // start of the block.
  1924. if (IterHandle != CurValue) {
  1925. CurInstIterator = BB->begin();
  1926. SunkAddrs.clear();
  1927. }
  1928. return true;
  1929. }
  1930. case Intrinsic::masked_load: {
  1931. // Scalarize unsupported vector masked load
  1932. if (!TTI->isLegalMaskedLoad(CI->getType())) {
  1933. scalarizeMaskedLoad(CI);
  1934. ModifiedDT = true;
  1935. return true;
  1936. }
  1937. return false;
  1938. }
  1939. case Intrinsic::masked_store: {
  1940. if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType())) {
  1941. scalarizeMaskedStore(CI);
  1942. ModifiedDT = true;
  1943. return true;
  1944. }
  1945. return false;
  1946. }
  1947. case Intrinsic::masked_gather: {
  1948. if (!TTI->isLegalMaskedGather(CI->getType())) {
  1949. scalarizeMaskedGather(CI);
  1950. ModifiedDT = true;
  1951. return true;
  1952. }
  1953. return false;
  1954. }
  1955. case Intrinsic::masked_scatter: {
  1956. if (!TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType())) {
  1957. scalarizeMaskedScatter(CI);
  1958. ModifiedDT = true;
  1959. return true;
  1960. }
  1961. return false;
  1962. }
  1963. case Intrinsic::aarch64_stlxr:
  1964. case Intrinsic::aarch64_stxr: {
  1965. ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
  1966. if (!ExtVal || !ExtVal->hasOneUse() ||
  1967. ExtVal->getParent() == CI->getParent())
  1968. return false;
  1969. // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
  1970. ExtVal->moveBefore(CI);
  1971. // Mark this instruction as "inserted by CGP", so that other
  1972. // optimizations don't touch it.
  1973. InsertedInsts.insert(ExtVal);
  1974. return true;
  1975. }
  1976. case Intrinsic::invariant_group_barrier:
  1977. II->replaceAllUsesWith(II->getArgOperand(0));
  1978. II->eraseFromParent();
  1979. return true;
  1980. case Intrinsic::cttz:
  1981. case Intrinsic::ctlz:
  1982. // If counting zeros is expensive, try to avoid it.
  1983. return despeculateCountZeros(II, TLI, DL, ModifiedDT);
  1984. }
  1985. if (TLI) {
  1986. SmallVector<Value*, 2> PtrOps;
  1987. Type *AccessTy;
  1988. if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
  1989. while (!PtrOps.empty()) {
  1990. Value *PtrVal = PtrOps.pop_back_val();
  1991. unsigned AS = PtrVal->getType()->getPointerAddressSpace();
  1992. if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
  1993. return true;
  1994. }
  1995. }
  1996. }
  1997. // From here on out we're working with named functions.
  1998. if (!CI->getCalledFunction()) return false;
  1999. // Lower all default uses of _chk calls. This is very similar
  2000. // to what InstCombineCalls does, but here we are only lowering calls
  2001. // to fortified library functions (e.g. __memcpy_chk) that have the default
  2002. // "don't know" as the objectsize. Anything else should be left alone.
  2003. FortifiedLibCallSimplifier Simplifier(TLInfo, true);
  2004. if (Value *V = Simplifier.optimizeCall(CI)) {
  2005. CI->replaceAllUsesWith(V);
  2006. CI->eraseFromParent();
  2007. return true;
  2008. }
  2009. return false;
  2010. }
  2011. /// Look for opportunities to duplicate return instructions to the predecessor
  2012. /// to enable tail call optimizations. The case it is currently looking for is:
  2013. /// @code
  2014. /// bb0:
  2015. /// %tmp0 = tail call i32 @f0()
  2016. /// br label %return
  2017. /// bb1:
  2018. /// %tmp1 = tail call i32 @f1()
  2019. /// br label %return
  2020. /// bb2:
  2021. /// %tmp2 = tail call i32 @f2()
  2022. /// br label %return
  2023. /// return:
  2024. /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
  2025. /// ret i32 %retval
  2026. /// @endcode
  2027. ///
  2028. /// =>
  2029. ///
  2030. /// @code
  2031. /// bb0:
  2032. /// %tmp0 = tail call i32 @f0()
  2033. /// ret i32 %tmp0
  2034. /// bb1:
  2035. /// %tmp1 = tail call i32 @f1()
  2036. /// ret i32 %tmp1
  2037. /// bb2:
  2038. /// %tmp2 = tail call i32 @f2()
  2039. /// ret i32 %tmp2
  2040. /// @endcode
  2041. bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) {
  2042. if (!TLI)
  2043. return false;
  2044. ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
  2045. if (!RetI)
  2046. return false;
  2047. PHINode *PN = nullptr;
  2048. BitCastInst *BCI = nullptr;
  2049. Value *V = RetI->getReturnValue();
  2050. if (V) {
  2051. BCI = dyn_cast<BitCastInst>(V);
  2052. if (BCI)
  2053. V = BCI->getOperand(0);
  2054. PN = dyn_cast<PHINode>(V);
  2055. if (!PN)
  2056. return false;
  2057. }
  2058. if (PN && PN->getParent() != BB)
  2059. return false;
  2060. // Make sure there are no instructions between the PHI and return, or that the
  2061. // return is the first instruction in the block.
  2062. if (PN) {
  2063. BasicBlock::iterator BI = BB->begin();
  2064. do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
  2065. if (&*BI == BCI)
  2066. // Also skip over the bitcast.
  2067. ++BI;
  2068. if (&*BI != RetI)
  2069. return false;
  2070. } else {
  2071. BasicBlock::iterator BI = BB->begin();
  2072. while (isa<DbgInfoIntrinsic>(BI)) ++BI;
  2073. if (&*BI != RetI)
  2074. return false;
  2075. }
  2076. /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
  2077. /// call.
  2078. const Function *F = BB->getParent();
  2079. SmallVector<CallInst*, 4> TailCalls;
  2080. if (PN) {
  2081. for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
  2082. CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
  2083. // Make sure the phi value is indeed produced by the tail call.
  2084. if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
  2085. TLI->mayBeEmittedAsTailCall(CI) &&
  2086. attributesPermitTailCall(F, CI, RetI, *TLI))
  2087. TailCalls.push_back(CI);
  2088. }
  2089. } else {
  2090. SmallPtrSet<BasicBlock*, 4> VisitedBBs;
  2091. for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
  2092. if (!VisitedBBs.insert(*PI).second)
  2093. continue;
  2094. BasicBlock::InstListType &InstList = (*PI)->getInstList();
  2095. BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
  2096. BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
  2097. do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
  2098. if (RI == RE)
  2099. continue;
  2100. CallInst *CI = dyn_cast<CallInst>(&*RI);
  2101. if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
  2102. attributesPermitTailCall(F, CI, RetI, *TLI))
  2103. TailCalls.push_back(CI);
  2104. }
  2105. }
  2106. bool Changed = false;
  2107. for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
  2108. CallInst *CI = TailCalls[i];
  2109. CallSite CS(CI);
  2110. // Conservatively require the attributes of the call to match those of the
  2111. // return. Ignore noalias because it doesn't affect the call sequence.
  2112. AttributeList CalleeAttrs = CS.getAttributes();
  2113. if (AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex)
  2114. .removeAttribute(Attribute::NoAlias) !=
  2115. AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex)
  2116. .removeAttribute(Attribute::NoAlias))
  2117. continue;
  2118. // Make sure the call instruction is followed by an unconditional branch to
  2119. // the return block.
  2120. BasicBlock *CallBB = CI->getParent();
  2121. BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
  2122. if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
  2123. continue;
  2124. // Duplicate the return into CallBB.
  2125. (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB);
  2126. ModifiedDT = Changed = true;
  2127. ++NumRetsDup;
  2128. }
  2129. // If we eliminated all predecessors of the block, delete the block now.
  2130. if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
  2131. BB->eraseFromParent();
  2132. return Changed;
  2133. }
  2134. //===----------------------------------------------------------------------===//
  2135. // Memory Optimization
  2136. //===----------------------------------------------------------------------===//
  2137. namespace {
  2138. /// This is an extended version of TargetLowering::AddrMode
  2139. /// which holds actual Value*'s for register values.
  2140. struct ExtAddrMode : public TargetLowering::AddrMode {
  2141. Value *BaseReg;
  2142. Value *ScaledReg;
  2143. ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {}
  2144. void print(raw_ostream &OS) const;
  2145. void dump() const;
  2146. bool operator==(const ExtAddrMode& O) const {
  2147. return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
  2148. (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
  2149. (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
  2150. }
  2151. };
  2152. #ifndef NDEBUG
  2153. static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
  2154. AM.print(OS);
  2155. return OS;
  2156. }
  2157. #endif
  2158. void ExtAddrMode::print(raw_ostream &OS) const {
  2159. bool NeedPlus = false;
  2160. OS << "[";
  2161. if (BaseGV) {
  2162. OS << (NeedPlus ? " + " : "")
  2163. << "GV:";
  2164. BaseGV->printAsOperand(OS, /*PrintType=*/false);
  2165. NeedPlus = true;
  2166. }
  2167. if (BaseOffs) {
  2168. OS << (NeedPlus ? " + " : "")
  2169. << BaseOffs;
  2170. NeedPlus = true;
  2171. }
  2172. if (BaseReg) {
  2173. OS << (NeedPlus ? " + " : "")
  2174. << "Base:";
  2175. BaseReg->printAsOperand(OS, /*PrintType=*/false);
  2176. NeedPlus = true;
  2177. }
  2178. if (Scale) {
  2179. OS << (NeedPlus ? " + " : "")
  2180. << Scale << "*";
  2181. ScaledReg->printAsOperand(OS, /*PrintType=*/false);
  2182. }
  2183. OS << ']';
  2184. }
  2185. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  2186. LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
  2187. print(dbgs());
  2188. dbgs() << '\n';
  2189. }
  2190. #endif
  2191. /// \brief This class provides transaction based operation on the IR.
  2192. /// Every change made through this class is recorded in the internal state and
  2193. /// can be undone (rollback) until commit is called.
  2194. class TypePromotionTransaction {
  2195. /// \brief This represents the common interface of the individual transaction.
  2196. /// Each class implements the logic for doing one specific modification on
  2197. /// the IR via the TypePromotionTransaction.
  2198. class TypePromotionAction {
  2199. protected:
  2200. /// The Instruction modified.
  2201. Instruction *Inst;
  2202. public:
  2203. /// \brief Constructor of the action.
  2204. /// The constructor performs the related action on the IR.
  2205. TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
  2206. virtual ~TypePromotionAction() {}
  2207. /// \brief Undo the modification done by this action.
  2208. /// When this method is called, the IR must be in the same state as it was
  2209. /// before this action was applied.
  2210. /// \pre Undoing the action works if and only if the IR is in the exact same
  2211. /// state as it was directly after this action was applied.
  2212. virtual void undo() = 0;
  2213. /// \brief Advocate every change made by this action.
  2214. /// When the results on the IR of the action are to be kept, it is important
  2215. /// to call this function, otherwise hidden information may be kept forever.
  2216. virtual void commit() {
  2217. // Nothing to be done, this action is not doing anything.
  2218. }
  2219. };
  2220. /// \brief Utility to remember the position of an instruction.
  2221. class InsertionHandler {
  2222. /// Position of an instruction.
  2223. /// Either an instruction:
  2224. /// - Is the first in a basic block: BB is used.
  2225. /// - Has a previous instructon: PrevInst is used.
  2226. union {
  2227. Instruction *PrevInst;
  2228. BasicBlock *BB;
  2229. } Point;
  2230. /// Remember whether or not the instruction had a previous instruction.
  2231. bool HasPrevInstruction;
  2232. public:
  2233. /// \brief Record the position of \p Inst.
  2234. InsertionHandler(Instruction *Inst) {
  2235. BasicBlock::iterator It = Inst->getIterator();
  2236. HasPrevInstruction = (It != (Inst->getParent()->begin()));
  2237. if (HasPrevInstruction)
  2238. Point.PrevInst = &*--It;
  2239. else
  2240. Point.BB = Inst->getParent();
  2241. }
  2242. /// \brief Insert \p Inst at the recorded position.
  2243. void insert(Instruction *Inst) {
  2244. if (HasPrevInstruction) {
  2245. if (Inst->getParent())
  2246. Inst->removeFromParent();
  2247. Inst->insertAfter(Point.PrevInst);
  2248. } else {
  2249. Instruction *Position = &*Point.BB->getFirstInsertionPt();
  2250. if (Inst->getParent())
  2251. Inst->moveBefore(Position);
  2252. else
  2253. Inst->insertBefore(Position);
  2254. }
  2255. }
  2256. };
  2257. /// \brief Move an instruction before another.
  2258. class InstructionMoveBefore : public TypePromotionAction {
  2259. /// Original position of the instruction.
  2260. InsertionHandler Position;
  2261. public:
  2262. /// \brief Move \p Inst before \p Before.
  2263. InstructionMoveBefore(Instruction *Inst, Instruction *Before)
  2264. : TypePromotionAction(Inst), Position(Inst) {
  2265. DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n");
  2266. Inst->moveBefore(Before);
  2267. }
  2268. /// \brief Move the instruction back to its original position.
  2269. void undo() override {
  2270. DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
  2271. Position.insert(Inst);
  2272. }
  2273. };
  2274. /// \brief Set the operand of an instruction with a new value.
  2275. class OperandSetter : public TypePromotionAction {
  2276. /// Original operand of the instruction.
  2277. Value *Origin;
  2278. /// Index of the modified instruction.
  2279. unsigned Idx;
  2280. public:
  2281. /// \brief Set \p Idx operand of \p Inst with \p NewVal.
  2282. OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
  2283. : TypePromotionAction(Inst), Idx(Idx) {
  2284. DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
  2285. << "for:" << *Inst << "\n"
  2286. << "with:" << *NewVal << "\n");
  2287. Origin = Inst->getOperand(Idx);
  2288. Inst->setOperand(Idx, NewVal);
  2289. }
  2290. /// \brief Restore the original value of the instruction.
  2291. void undo() override {
  2292. DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
  2293. << "for: " << *Inst << "\n"
  2294. << "with: " << *Origin << "\n");
  2295. Inst->setOperand(Idx, Origin);
  2296. }
  2297. };
  2298. /// \brief Hide the operands of an instruction.
  2299. /// Do as if this instruction was not using any of its operands.
  2300. class OperandsHider : public TypePromotionAction {
  2301. /// The list of original operands.
  2302. SmallVector<Value *, 4> OriginalValues;
  2303. public:
  2304. /// \brief Remove \p Inst from the uses of the operands of \p Inst.
  2305. OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
  2306. DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
  2307. unsigned NumOpnds = Inst->getNumOperands();
  2308. OriginalValues.reserve(NumOpnds);
  2309. for (unsigned It = 0; It < NumOpnds; ++It) {
  2310. // Save the current operand.
  2311. Value *Val = Inst->getOperand(It);
  2312. OriginalValues.push_back(Val);
  2313. // Set a dummy one.
  2314. // We could use OperandSetter here, but that would imply an overhead
  2315. // that we are not willing to pay.
  2316. Inst->setOperand(It, UndefValue::get(Val->getType()));
  2317. }
  2318. }
  2319. /// \brief Restore the original list of uses.
  2320. void undo() override {
  2321. DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
  2322. for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
  2323. Inst->setOperand(It, OriginalValues[It]);
  2324. }
  2325. };
  2326. /// \brief Build a truncate instruction.
  2327. class TruncBuilder : public TypePromotionAction {
  2328. Value *Val;
  2329. public:
  2330. /// \brief Build a truncate instruction of \p Opnd producing a \p Ty
  2331. /// result.
  2332. /// trunc Opnd to Ty.
  2333. TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
  2334. IRBuilder<> Builder(Opnd);
  2335. Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
  2336. DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
  2337. }
  2338. /// \brief Get the built value.
  2339. Value *getBuiltValue() { return Val; }
  2340. /// \brief Remove the built instruction.
  2341. void undo() override {
  2342. DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
  2343. if (Instruction *IVal = dyn_cast<Instruction>(Val))
  2344. IVal->eraseFromParent();
  2345. }
  2346. };
  2347. /// \brief Build a sign extension instruction.
  2348. class SExtBuilder : public TypePromotionAction {
  2349. Value *Val;
  2350. public:
  2351. /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty
  2352. /// result.
  2353. /// sext Opnd to Ty.
  2354. SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
  2355. : TypePromotionAction(InsertPt) {
  2356. IRBuilder<> Builder(InsertPt);
  2357. Val = Builder.CreateSExt(Opnd, Ty, "promoted");
  2358. DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
  2359. }
  2360. /// \brief Get the built value.
  2361. Value *getBuiltValue() { return Val; }
  2362. /// \brief Remove the built instruction.
  2363. void undo() override {
  2364. DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
  2365. if (Instruction *IVal = dyn_cast<Instruction>(Val))
  2366. IVal->eraseFromParent();
  2367. }
  2368. };
  2369. /// \brief Build a zero extension instruction.
  2370. class ZExtBuilder : public TypePromotionAction {
  2371. Value *Val;
  2372. public:
  2373. /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty
  2374. /// result.
  2375. /// zext Opnd to Ty.
  2376. ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
  2377. : TypePromotionAction(InsertPt) {
  2378. IRBuilder<> Builder(InsertPt);
  2379. Val = Builder.CreateZExt(Opnd, Ty, "promoted");
  2380. DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
  2381. }
  2382. /// \brief Get the built value.
  2383. Value *getBuiltValue() { return Val; }
  2384. /// \brief Remove the built instruction.
  2385. void undo() override {
  2386. DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
  2387. if (Instruction *IVal = dyn_cast<Instruction>(Val))
  2388. IVal->eraseFromParent();
  2389. }
  2390. };
  2391. /// \brief Mutate an instruction to another type.
  2392. class TypeMutator : public TypePromotionAction {
  2393. /// Record the original type.
  2394. Type *OrigTy;
  2395. public:
  2396. /// \brief Mutate the type of \p Inst into \p NewTy.
  2397. TypeMutator(Instruction *Inst, Type *NewTy)
  2398. : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
  2399. DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
  2400. << "\n");
  2401. Inst->mutateType(NewTy);
  2402. }
  2403. /// \brief Mutate the instruction back to its original type.
  2404. void undo() override {
  2405. DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
  2406. << "\n");
  2407. Inst->mutateType(OrigTy);
  2408. }
  2409. };
  2410. /// \brief Replace the uses of an instruction by another instruction.
  2411. class UsesReplacer : public TypePromotionAction {
  2412. /// Helper structure to keep track of the replaced uses.
  2413. struct InstructionAndIdx {
  2414. /// The instruction using the instruction.
  2415. Instruction *Inst;
  2416. /// The index where this instruction is used for Inst.
  2417. unsigned Idx;
  2418. InstructionAndIdx(Instruction *Inst, unsigned Idx)
  2419. : Inst(Inst), Idx(Idx) {}
  2420. };
  2421. /// Keep track of the original uses (pair Instruction, Index).
  2422. SmallVector<InstructionAndIdx, 4> OriginalUses;
  2423. typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator;
  2424. public:
  2425. /// \brief Replace all the use of \p Inst by \p New.
  2426. UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
  2427. DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
  2428. << "\n");
  2429. // Record the original uses.
  2430. for (Use &U : Inst->uses()) {
  2431. Instruction *UserI = cast<Instruction>(U.getUser());
  2432. OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
  2433. }
  2434. // Now, we can replace the uses.
  2435. Inst->replaceAllUsesWith(New);
  2436. }
  2437. /// \brief Reassign the original uses of Inst to Inst.
  2438. void undo() override {
  2439. DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
  2440. for (use_iterator UseIt = OriginalUses.begin(),
  2441. EndIt = OriginalUses.end();
  2442. UseIt != EndIt; ++UseIt) {
  2443. UseIt->Inst->setOperand(UseIt->Idx, Inst);
  2444. }
  2445. }
  2446. };
  2447. /// \brief Remove an instruction from the IR.
  2448. class InstructionRemover : public TypePromotionAction {
  2449. /// Original position of the instruction.
  2450. InsertionHandler Inserter;
  2451. /// Helper structure to hide all the link to the instruction. In other
  2452. /// words, this helps to do as if the instruction was removed.
  2453. OperandsHider Hider;
  2454. /// Keep track of the uses replaced, if any.
  2455. UsesReplacer *Replacer;
  2456. /// Keep track of instructions removed.
  2457. SetOfInstrs &RemovedInsts;
  2458. public:
  2459. /// \brief Remove all reference of \p Inst and optinally replace all its
  2460. /// uses with New.
  2461. /// \p RemovedInsts Keep track of the instructions removed by this Action.
  2462. /// \pre If !Inst->use_empty(), then New != nullptr
  2463. InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
  2464. Value *New = nullptr)
  2465. : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
  2466. Replacer(nullptr), RemovedInsts(RemovedInsts) {
  2467. if (New)
  2468. Replacer = new UsesReplacer(Inst, New);
  2469. DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
  2470. RemovedInsts.insert(Inst);
  2471. /// The instructions removed here will be freed after completing
  2472. /// optimizeBlock() for all blocks as we need to keep track of the
  2473. /// removed instructions during promotion.
  2474. Inst->removeFromParent();
  2475. }
  2476. ~InstructionRemover() override { delete Replacer; }
  2477. /// \brief Resurrect the instruction and reassign it to the proper uses if
  2478. /// new value was provided when build this action.
  2479. void undo() override {
  2480. DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
  2481. Inserter.insert(Inst);
  2482. if (Replacer)
  2483. Replacer->undo();
  2484. Hider.undo();
  2485. RemovedInsts.erase(Inst);
  2486. }
  2487. };
  2488. public:
  2489. /// Restoration point.
  2490. /// The restoration point is a pointer to an action instead of an iterator
  2491. /// because the iterator may be invalidated but not the pointer.
  2492. typedef const TypePromotionAction *ConstRestorationPt;
  2493. TypePromotionTransaction(SetOfInstrs &RemovedInsts)
  2494. : RemovedInsts(RemovedInsts) {}
  2495. /// Advocate every changes made in that transaction.
  2496. void commit();
  2497. /// Undo all the changes made after the given point.
  2498. void rollback(ConstRestorationPt Point);
  2499. /// Get the current restoration point.
  2500. ConstRestorationPt getRestorationPoint() const;
  2501. /// \name API for IR modification with state keeping to support rollback.
  2502. /// @{
  2503. /// Same as Instruction::setOperand.
  2504. void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
  2505. /// Same as Instruction::eraseFromParent.
  2506. void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
  2507. /// Same as Value::replaceAllUsesWith.
  2508. void replaceAllUsesWith(Instruction *Inst, Value *New);
  2509. /// Same as Value::mutateType.
  2510. void mutateType(Instruction *Inst, Type *NewTy);
  2511. /// Same as IRBuilder::createTrunc.
  2512. Value *createTrunc(Instruction *Opnd, Type *Ty);
  2513. /// Same as IRBuilder::createSExt.
  2514. Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
  2515. /// Same as IRBuilder::createZExt.
  2516. Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
  2517. /// Same as Instruction::moveBefore.
  2518. void moveBefore(Instruction *Inst, Instruction *Before);
  2519. /// @}
  2520. private:
  2521. /// The ordered list of actions made so far.
  2522. SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
  2523. typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt;
  2524. SetOfInstrs &RemovedInsts;
  2525. };
  2526. void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
  2527. Value *NewVal) {
  2528. Actions.push_back(
  2529. make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal));
  2530. }
  2531. void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
  2532. Value *NewVal) {
  2533. Actions.push_back(
  2534. make_unique<TypePromotionTransaction::InstructionRemover>(Inst,
  2535. RemovedInsts, NewVal));
  2536. }
  2537. void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
  2538. Value *New) {
  2539. Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
  2540. }
  2541. void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
  2542. Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
  2543. }
  2544. Value *TypePromotionTransaction::createTrunc(Instruction *Opnd,
  2545. Type *Ty) {
  2546. std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
  2547. Value *Val = Ptr->getBuiltValue();
  2548. Actions.push_back(std::move(Ptr));
  2549. return Val;
  2550. }
  2551. Value *TypePromotionTransaction::createSExt(Instruction *Inst,
  2552. Value *Opnd, Type *Ty) {
  2553. std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
  2554. Value *Val = Ptr->getBuiltValue();
  2555. Actions.push_back(std::move(Ptr));
  2556. return Val;
  2557. }
  2558. Value *TypePromotionTransaction::createZExt(Instruction *Inst,
  2559. Value *Opnd, Type *Ty) {
  2560. std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
  2561. Value *Val = Ptr->getBuiltValue();
  2562. Actions.push_back(std::move(Ptr));
  2563. return Val;
  2564. }
  2565. void TypePromotionTransaction::moveBefore(Instruction *Inst,
  2566. Instruction *Before) {
  2567. Actions.push_back(
  2568. make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before));
  2569. }
  2570. TypePromotionTransaction::ConstRestorationPt
  2571. TypePromotionTransaction::getRestorationPoint() const {
  2572. return !Actions.empty() ? Actions.back().get() : nullptr;
  2573. }
  2574. void TypePromotionTransaction::commit() {
  2575. for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
  2576. ++It)
  2577. (*It)->commit();
  2578. Actions.clear();
  2579. }
  2580. void TypePromotionTransaction::rollback(
  2581. TypePromotionTransaction::ConstRestorationPt Point) {
  2582. while (!Actions.empty() && Point != Actions.back().get()) {
  2583. std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
  2584. Curr->undo();
  2585. }
  2586. }
  2587. /// \brief A helper class for matching addressing modes.
  2588. ///
  2589. /// This encapsulates the logic for matching the target-legal addressing modes.
  2590. class AddressingModeMatcher {
  2591. SmallVectorImpl<Instruction*> &AddrModeInsts;
  2592. const TargetLowering &TLI;
  2593. const TargetRegisterInfo &TRI;
  2594. const DataLayout &DL;
  2595. /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
  2596. /// the memory instruction that we're computing this address for.
  2597. Type *AccessTy;
  2598. unsigned AddrSpace;
  2599. Instruction *MemoryInst;
  2600. /// This is the addressing mode that we're building up. This is
  2601. /// part of the return value of this addressing mode matching stuff.
  2602. ExtAddrMode &AddrMode;
  2603. /// The instructions inserted by other CodeGenPrepare optimizations.
  2604. const SetOfInstrs &InsertedInsts;
  2605. /// A map from the instructions to their type before promotion.
  2606. InstrToOrigTy &PromotedInsts;
  2607. /// The ongoing transaction where every action should be registered.
  2608. TypePromotionTransaction &TPT;
  2609. /// This is set to true when we should not do profitability checks.
  2610. /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
  2611. bool IgnoreProfitability;
  2612. AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI,
  2613. const TargetLowering &TLI,
  2614. const TargetRegisterInfo &TRI,
  2615. Type *AT, unsigned AS,
  2616. Instruction *MI, ExtAddrMode &AM,
  2617. const SetOfInstrs &InsertedInsts,
  2618. InstrToOrigTy &PromotedInsts,
  2619. TypePromotionTransaction &TPT)
  2620. : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
  2621. DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
  2622. MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts),
  2623. PromotedInsts(PromotedInsts), TPT(TPT) {
  2624. IgnoreProfitability = false;
  2625. }
  2626. public:
  2627. /// Find the maximal addressing mode that a load/store of V can fold,
  2628. /// give an access type of AccessTy. This returns a list of involved
  2629. /// instructions in AddrModeInsts.
  2630. /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
  2631. /// optimizations.
  2632. /// \p PromotedInsts maps the instructions to their type before promotion.
  2633. /// \p The ongoing transaction where every action should be registered.
  2634. static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS,
  2635. Instruction *MemoryInst,
  2636. SmallVectorImpl<Instruction*> &AddrModeInsts,
  2637. const TargetLowering &TLI,
  2638. const TargetRegisterInfo &TRI,
  2639. const SetOfInstrs &InsertedInsts,
  2640. InstrToOrigTy &PromotedInsts,
  2641. TypePromotionTransaction &TPT) {
  2642. ExtAddrMode Result;
  2643. bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI,
  2644. AccessTy, AS,
  2645. MemoryInst, Result, InsertedInsts,
  2646. PromotedInsts, TPT).matchAddr(V, 0);
  2647. (void)Success; assert(Success && "Couldn't select *anything*?");
  2648. return Result;
  2649. }
  2650. private:
  2651. bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
  2652. bool matchAddr(Value *V, unsigned Depth);
  2653. bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth,
  2654. bool *MovedAway = nullptr);
  2655. bool isProfitableToFoldIntoAddressingMode(Instruction *I,
  2656. ExtAddrMode &AMBefore,
  2657. ExtAddrMode &AMAfter);
  2658. bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
  2659. bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
  2660. Value *PromotedOperand) const;
  2661. };
  2662. /// Try adding ScaleReg*Scale to the current addressing mode.
  2663. /// Return true and update AddrMode if this addr mode is legal for the target,
  2664. /// false if not.
  2665. bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
  2666. unsigned Depth) {
  2667. // If Scale is 1, then this is the same as adding ScaleReg to the addressing
  2668. // mode. Just process that directly.
  2669. if (Scale == 1)
  2670. return matchAddr(ScaleReg, Depth);
  2671. // If the scale is 0, it takes nothing to add this.
  2672. if (Scale == 0)
  2673. return true;
  2674. // If we already have a scale of this value, we can add to it, otherwise, we
  2675. // need an available scale field.
  2676. if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
  2677. return false;
  2678. ExtAddrMode TestAddrMode = AddrMode;
  2679. // Add scale to turn X*4+X*3 -> X*7. This could also do things like
  2680. // [A+B + A*7] -> [B+A*8].
  2681. TestAddrMode.Scale += Scale;
  2682. TestAddrMode.ScaledReg = ScaleReg;
  2683. // If the new address isn't legal, bail out.
  2684. if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
  2685. return false;
  2686. // It was legal, so commit it.
  2687. AddrMode = TestAddrMode;
  2688. // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
  2689. // to see if ScaleReg is actually X+C. If so, we can turn this into adding
  2690. // X*Scale + C*Scale to addr mode.
  2691. ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
  2692. if (isa<Instruction>(ScaleReg) && // not a constant expr.
  2693. match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
  2694. TestAddrMode.ScaledReg = AddLHS;
  2695. TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
  2696. // If this addressing mode is legal, commit it and remember that we folded
  2697. // this instruction.
  2698. if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
  2699. AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
  2700. AddrMode = TestAddrMode;
  2701. return true;
  2702. }
  2703. }
  2704. // Otherwise, not (x+c)*scale, just return what we have.
  2705. return true;
  2706. }
  2707. /// This is a little filter, which returns true if an addressing computation
  2708. /// involving I might be folded into a load/store accessing it.
  2709. /// This doesn't need to be perfect, but needs to accept at least
  2710. /// the set of instructions that MatchOperationAddr can.
  2711. static bool MightBeFoldableInst(Instruction *I) {
  2712. switch (I->getOpcode()) {
  2713. case Instruction::BitCast:
  2714. case Instruction::AddrSpaceCast:
  2715. // Don't touch identity bitcasts.
  2716. if (I->getType() == I->getOperand(0)->getType())
  2717. return false;
  2718. return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
  2719. case Instruction::PtrToInt:
  2720. // PtrToInt is always a noop, as we know that the int type is pointer sized.
  2721. return true;
  2722. case Instruction::IntToPtr:
  2723. // We know the input is intptr_t, so this is foldable.
  2724. return true;
  2725. case Instruction::Add:
  2726. return true;
  2727. case Instruction::Mul:
  2728. case Instruction::Shl:
  2729. // Can only handle X*C and X << C.
  2730. return isa<ConstantInt>(I->getOperand(1));
  2731. case Instruction::GetElementPtr:
  2732. return true;
  2733. default:
  2734. return false;
  2735. }
  2736. }
  2737. /// \brief Check whether or not \p Val is a legal instruction for \p TLI.
  2738. /// \note \p Val is assumed to be the product of some type promotion.
  2739. /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
  2740. /// to be legal, as the non-promoted value would have had the same state.
  2741. static bool isPromotedInstructionLegal(const TargetLowering &TLI,
  2742. const DataLayout &DL, Value *Val) {
  2743. Instruction *PromotedInst = dyn_cast<Instruction>(Val);
  2744. if (!PromotedInst)
  2745. return false;
  2746. int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
  2747. // If the ISDOpcode is undefined, it was undefined before the promotion.
  2748. if (!ISDOpcode)
  2749. return true;
  2750. // Otherwise, check if the promoted instruction is legal or not.
  2751. return TLI.isOperationLegalOrCustom(
  2752. ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
  2753. }
  2754. /// \brief Hepler class to perform type promotion.
  2755. class TypePromotionHelper {
  2756. /// \brief Utility function to check whether or not a sign or zero extension
  2757. /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
  2758. /// either using the operands of \p Inst or promoting \p Inst.
  2759. /// The type of the extension is defined by \p IsSExt.
  2760. /// In other words, check if:
  2761. /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
  2762. /// #1 Promotion applies:
  2763. /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
  2764. /// #2 Operand reuses:
  2765. /// ext opnd1 to ConsideredExtType.
  2766. /// \p PromotedInsts maps the instructions to their type before promotion.
  2767. static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
  2768. const InstrToOrigTy &PromotedInsts, bool IsSExt);
  2769. /// \brief Utility function to determine if \p OpIdx should be promoted when
  2770. /// promoting \p Inst.
  2771. static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
  2772. return !(isa<SelectInst>(Inst) && OpIdx == 0);
  2773. }
  2774. /// \brief Utility function to promote the operand of \p Ext when this
  2775. /// operand is a promotable trunc or sext or zext.
  2776. /// \p PromotedInsts maps the instructions to their type before promotion.
  2777. /// \p CreatedInstsCost[out] contains the cost of all instructions
  2778. /// created to promote the operand of Ext.
  2779. /// Newly added extensions are inserted in \p Exts.
  2780. /// Newly added truncates are inserted in \p Truncs.
  2781. /// Should never be called directly.
  2782. /// \return The promoted value which is used instead of Ext.
  2783. static Value *promoteOperandForTruncAndAnyExt(
  2784. Instruction *Ext, TypePromotionTransaction &TPT,
  2785. InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
  2786. SmallVectorImpl<Instruction *> *Exts,
  2787. SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
  2788. /// \brief Utility function to promote the operand of \p Ext when this
  2789. /// operand is promotable and is not a supported trunc or sext.
  2790. /// \p PromotedInsts maps the instructions to their type before promotion.
  2791. /// \p CreatedInstsCost[out] contains the cost of all the instructions
  2792. /// created to promote the operand of Ext.
  2793. /// Newly added extensions are inserted in \p Exts.
  2794. /// Newly added truncates are inserted in \p Truncs.
  2795. /// Should never be called directly.
  2796. /// \return The promoted value which is used instead of Ext.
  2797. static Value *promoteOperandForOther(Instruction *Ext,
  2798. TypePromotionTransaction &TPT,
  2799. InstrToOrigTy &PromotedInsts,
  2800. unsigned &CreatedInstsCost,
  2801. SmallVectorImpl<Instruction *> *Exts,
  2802. SmallVectorImpl<Instruction *> *Truncs,
  2803. const TargetLowering &TLI, bool IsSExt);
  2804. /// \see promoteOperandForOther.
  2805. static Value *signExtendOperandForOther(
  2806. Instruction *Ext, TypePromotionTransaction &TPT,
  2807. InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
  2808. SmallVectorImpl<Instruction *> *Exts,
  2809. SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
  2810. return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
  2811. Exts, Truncs, TLI, true);
  2812. }
  2813. /// \see promoteOperandForOther.
  2814. static Value *zeroExtendOperandForOther(
  2815. Instruction *Ext, TypePromotionTransaction &TPT,
  2816. InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
  2817. SmallVectorImpl<Instruction *> *Exts,
  2818. SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
  2819. return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
  2820. Exts, Truncs, TLI, false);
  2821. }
  2822. public:
  2823. /// Type for the utility function that promotes the operand of Ext.
  2824. typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT,
  2825. InstrToOrigTy &PromotedInsts,
  2826. unsigned &CreatedInstsCost,
  2827. SmallVectorImpl<Instruction *> *Exts,
  2828. SmallVectorImpl<Instruction *> *Truncs,
  2829. const TargetLowering &TLI);
  2830. /// \brief Given a sign/zero extend instruction \p Ext, return the approriate
  2831. /// action to promote the operand of \p Ext instead of using Ext.
  2832. /// \return NULL if no promotable action is possible with the current
  2833. /// sign extension.
  2834. /// \p InsertedInsts keeps track of all the instructions inserted by the
  2835. /// other CodeGenPrepare optimizations. This information is important
  2836. /// because we do not want to promote these instructions as CodeGenPrepare
  2837. /// will reinsert them later. Thus creating an infinite loop: create/remove.
  2838. /// \p PromotedInsts maps the instructions to their type before promotion.
  2839. static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
  2840. const TargetLowering &TLI,
  2841. const InstrToOrigTy &PromotedInsts);
  2842. };
  2843. bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
  2844. Type *ConsideredExtType,
  2845. const InstrToOrigTy &PromotedInsts,
  2846. bool IsSExt) {
  2847. // The promotion helper does not know how to deal with vector types yet.
  2848. // To be able to fix that, we would need to fix the places where we
  2849. // statically extend, e.g., constants and such.
  2850. if (Inst->getType()->isVectorTy())
  2851. return false;
  2852. // We can always get through zext.
  2853. if (isa<ZExtInst>(Inst))
  2854. return true;
  2855. // sext(sext) is ok too.
  2856. if (IsSExt && isa<SExtInst>(Inst))
  2857. return true;
  2858. // We can get through binary operator, if it is legal. In other words, the
  2859. // binary operator must have a nuw or nsw flag.
  2860. const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
  2861. if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
  2862. ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
  2863. (IsSExt && BinOp->hasNoSignedWrap())))
  2864. return true;
  2865. // Check if we can do the following simplification.
  2866. // ext(trunc(opnd)) --> ext(opnd)
  2867. if (!isa<TruncInst>(Inst))
  2868. return false;
  2869. Value *OpndVal = Inst->getOperand(0);
  2870. // Check if we can use this operand in the extension.
  2871. // If the type is larger than the result type of the extension, we cannot.
  2872. if (!OpndVal->getType()->isIntegerTy() ||
  2873. OpndVal->getType()->getIntegerBitWidth() >
  2874. ConsideredExtType->getIntegerBitWidth())
  2875. return false;
  2876. // If the operand of the truncate is not an instruction, we will not have
  2877. // any information on the dropped bits.
  2878. // (Actually we could for constant but it is not worth the extra logic).
  2879. Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
  2880. if (!Opnd)
  2881. return false;
  2882. // Check if the source of the type is narrow enough.
  2883. // I.e., check that trunc just drops extended bits of the same kind of
  2884. // the extension.
  2885. // #1 get the type of the operand and check the kind of the extended bits.
  2886. const Type *OpndType;
  2887. InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
  2888. if (It != PromotedInsts.end() && It->second.getInt() == IsSExt)
  2889. OpndType = It->second.getPointer();
  2890. else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
  2891. OpndType = Opnd->getOperand(0)->getType();
  2892. else
  2893. return false;
  2894. // #2 check that the truncate just drops extended bits.
  2895. return Inst->getType()->getIntegerBitWidth() >=
  2896. OpndType->getIntegerBitWidth();
  2897. }
  2898. TypePromotionHelper::Action TypePromotionHelper::getAction(
  2899. Instruction *Ext, const SetOfInstrs &InsertedInsts,
  2900. const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
  2901. assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
  2902. "Unexpected instruction type");
  2903. Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
  2904. Type *ExtTy = Ext->getType();
  2905. bool IsSExt = isa<SExtInst>(Ext);
  2906. // If the operand of the extension is not an instruction, we cannot
  2907. // get through.
  2908. // If it, check we can get through.
  2909. if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
  2910. return nullptr;
  2911. // Do not promote if the operand has been added by codegenprepare.
  2912. // Otherwise, it means we are undoing an optimization that is likely to be
  2913. // redone, thus causing potential infinite loop.
  2914. if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
  2915. return nullptr;
  2916. // SExt or Trunc instructions.
  2917. // Return the related handler.
  2918. if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
  2919. isa<ZExtInst>(ExtOpnd))
  2920. return promoteOperandForTruncAndAnyExt;
  2921. // Regular instruction.
  2922. // Abort early if we will have to insert non-free instructions.
  2923. if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
  2924. return nullptr;
  2925. return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
  2926. }
  2927. Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
  2928. llvm::Instruction *SExt, TypePromotionTransaction &TPT,
  2929. InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
  2930. SmallVectorImpl<Instruction *> *Exts,
  2931. SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
  2932. // By construction, the operand of SExt is an instruction. Otherwise we cannot
  2933. // get through it and this method should not be called.
  2934. Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
  2935. Value *ExtVal = SExt;
  2936. bool HasMergedNonFreeExt = false;
  2937. if (isa<ZExtInst>(SExtOpnd)) {
  2938. // Replace s|zext(zext(opnd))
  2939. // => zext(opnd).
  2940. HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
  2941. Value *ZExt =
  2942. TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
  2943. TPT.replaceAllUsesWith(SExt, ZExt);
  2944. TPT.eraseInstruction(SExt);
  2945. ExtVal = ZExt;
  2946. } else {
  2947. // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
  2948. // => z|sext(opnd).
  2949. TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
  2950. }
  2951. CreatedInstsCost = 0;
  2952. // Remove dead code.
  2953. if (SExtOpnd->use_empty())
  2954. TPT.eraseInstruction(SExtOpnd);
  2955. // Check if the extension is still needed.
  2956. Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
  2957. if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
  2958. if (ExtInst) {
  2959. if (Exts)
  2960. Exts->push_back(ExtInst);
  2961. CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
  2962. }
  2963. return ExtVal;
  2964. }
  2965. // At this point we have: ext ty opnd to ty.
  2966. // Reassign the uses of ExtInst to the opnd and remove ExtInst.
  2967. Value *NextVal = ExtInst->getOperand(0);
  2968. TPT.eraseInstruction(ExtInst, NextVal);
  2969. return NextVal;
  2970. }
  2971. Value *TypePromotionHelper::promoteOperandForOther(
  2972. Instruction *Ext, TypePromotionTransaction &TPT,
  2973. InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
  2974. SmallVectorImpl<Instruction *> *Exts,
  2975. SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
  2976. bool IsSExt) {
  2977. // By construction, the operand of Ext is an instruction. Otherwise we cannot
  2978. // get through it and this method should not be called.
  2979. Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
  2980. CreatedInstsCost = 0;
  2981. if (!ExtOpnd->hasOneUse()) {
  2982. // ExtOpnd will be promoted.
  2983. // All its uses, but Ext, will need to use a truncated value of the
  2984. // promoted version.
  2985. // Create the truncate now.
  2986. Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
  2987. if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
  2988. ITrunc->removeFromParent();
  2989. // Insert it just after the definition.
  2990. ITrunc->insertAfter(ExtOpnd);
  2991. if (Truncs)
  2992. Truncs->push_back(ITrunc);
  2993. }
  2994. TPT.replaceAllUsesWith(ExtOpnd, Trunc);
  2995. // Restore the operand of Ext (which has been replaced by the previous call
  2996. // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
  2997. TPT.setOperand(Ext, 0, ExtOpnd);
  2998. }
  2999. // Get through the Instruction:
  3000. // 1. Update its type.
  3001. // 2. Replace the uses of Ext by Inst.
  3002. // 3. Extend each operand that needs to be extended.
  3003. // Remember the original type of the instruction before promotion.
  3004. // This is useful to know that the high bits are sign extended bits.
  3005. PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>(
  3006. ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt)));
  3007. // Step #1.
  3008. TPT.mutateType(ExtOpnd, Ext->getType());
  3009. // Step #2.
  3010. TPT.replaceAllUsesWith(Ext, ExtOpnd);
  3011. // Step #3.
  3012. Instruction *ExtForOpnd = Ext;
  3013. DEBUG(dbgs() << "Propagate Ext to operands\n");
  3014. for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
  3015. ++OpIdx) {
  3016. DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
  3017. if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
  3018. !shouldExtOperand(ExtOpnd, OpIdx)) {
  3019. DEBUG(dbgs() << "No need to propagate\n");
  3020. continue;
  3021. }
  3022. // Check if we can statically extend the operand.
  3023. Value *Opnd = ExtOpnd->getOperand(OpIdx);
  3024. if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
  3025. DEBUG(dbgs() << "Statically extend\n");
  3026. unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
  3027. APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
  3028. : Cst->getValue().zext(BitWidth);
  3029. TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
  3030. continue;
  3031. }
  3032. // UndefValue are typed, so we have to statically sign extend them.
  3033. if (isa<UndefValue>(Opnd)) {
  3034. DEBUG(dbgs() << "Statically extend\n");
  3035. TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
  3036. continue;
  3037. }
  3038. // Otherwise we have to explicity sign extend the operand.
  3039. // Check if Ext was reused to extend an operand.
  3040. if (!ExtForOpnd) {
  3041. // If yes, create a new one.
  3042. DEBUG(dbgs() << "More operands to ext\n");
  3043. Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
  3044. : TPT.createZExt(Ext, Opnd, Ext->getType());
  3045. if (!isa<Instruction>(ValForExtOpnd)) {
  3046. TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
  3047. continue;
  3048. }
  3049. ExtForOpnd = cast<Instruction>(ValForExtOpnd);
  3050. }
  3051. if (Exts)
  3052. Exts->push_back(ExtForOpnd);
  3053. TPT.setOperand(ExtForOpnd, 0, Opnd);
  3054. // Move the sign extension before the insertion point.
  3055. TPT.moveBefore(ExtForOpnd, ExtOpnd);
  3056. TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
  3057. CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
  3058. // If more sext are required, new instructions will have to be created.
  3059. ExtForOpnd = nullptr;
  3060. }
  3061. if (ExtForOpnd == Ext) {
  3062. DEBUG(dbgs() << "Extension is useless now\n");
  3063. TPT.eraseInstruction(Ext);
  3064. }
  3065. return ExtOpnd;
  3066. }
  3067. /// Check whether or not promoting an instruction to a wider type is profitable.
  3068. /// \p NewCost gives the cost of extension instructions created by the
  3069. /// promotion.
  3070. /// \p OldCost gives the cost of extension instructions before the promotion
  3071. /// plus the number of instructions that have been
  3072. /// matched in the addressing mode the promotion.
  3073. /// \p PromotedOperand is the value that has been promoted.
  3074. /// \return True if the promotion is profitable, false otherwise.
  3075. bool AddressingModeMatcher::isPromotionProfitable(
  3076. unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
  3077. DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n');
  3078. // The cost of the new extensions is greater than the cost of the
  3079. // old extension plus what we folded.
  3080. // This is not profitable.
  3081. if (NewCost > OldCost)
  3082. return false;
  3083. if (NewCost < OldCost)
  3084. return true;
  3085. // The promotion is neutral but it may help folding the sign extension in
  3086. // loads for instance.
  3087. // Check that we did not create an illegal instruction.
  3088. return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
  3089. }
  3090. /// Given an instruction or constant expr, see if we can fold the operation
  3091. /// into the addressing mode. If so, update the addressing mode and return
  3092. /// true, otherwise return false without modifying AddrMode.
  3093. /// If \p MovedAway is not NULL, it contains the information of whether or
  3094. /// not AddrInst has to be folded into the addressing mode on success.
  3095. /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
  3096. /// because it has been moved away.
  3097. /// Thus AddrInst must not be added in the matched instructions.
  3098. /// This state can happen when AddrInst is a sext, since it may be moved away.
  3099. /// Therefore, AddrInst may not be valid when MovedAway is true and it must
  3100. /// not be referenced anymore.
  3101. bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
  3102. unsigned Depth,
  3103. bool *MovedAway) {
  3104. // Avoid exponential behavior on extremely deep expression trees.
  3105. if (Depth >= 5) return false;
  3106. // By default, all matched instructions stay in place.
  3107. if (MovedAway)
  3108. *MovedAway = false;
  3109. switch (Opcode) {
  3110. case Instruction::PtrToInt:
  3111. // PtrToInt is always a noop, as we know that the int type is pointer sized.
  3112. return matchAddr(AddrInst->getOperand(0), Depth);
  3113. case Instruction::IntToPtr: {
  3114. auto AS = AddrInst->getType()->getPointerAddressSpace();
  3115. auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
  3116. // This inttoptr is a no-op if the integer type is pointer sized.
  3117. if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
  3118. return matchAddr(AddrInst->getOperand(0), Depth);
  3119. return false;
  3120. }
  3121. case Instruction::BitCast:
  3122. // BitCast is always a noop, and we can handle it as long as it is
  3123. // int->int or pointer->pointer (we don't want int<->fp or something).
  3124. if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
  3125. AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
  3126. // Don't touch identity bitcasts. These were probably put here by LSR,
  3127. // and we don't want to mess around with them. Assume it knows what it
  3128. // is doing.
  3129. AddrInst->getOperand(0)->getType() != AddrInst->getType())
  3130. return matchAddr(AddrInst->getOperand(0), Depth);
  3131. return false;
  3132. case Instruction::AddrSpaceCast: {
  3133. unsigned SrcAS
  3134. = AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
  3135. unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
  3136. if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
  3137. return matchAddr(AddrInst->getOperand(0), Depth);
  3138. return false;
  3139. }
  3140. case Instruction::Add: {
  3141. // Check to see if we can merge in the RHS then the LHS. If so, we win.
  3142. ExtAddrMode BackupAddrMode = AddrMode;
  3143. unsigned OldSize = AddrModeInsts.size();
  3144. // Start a transaction at this point.
  3145. // The LHS may match but not the RHS.
  3146. // Therefore, we need a higher level restoration point to undo partially
  3147. // matched operation.
  3148. TypePromotionTransaction::ConstRestorationPt LastKnownGood =
  3149. TPT.getRestorationPoint();
  3150. if (matchAddr(AddrInst->getOperand(1), Depth+1) &&
  3151. matchAddr(AddrInst->getOperand(0), Depth+1))
  3152. return true;
  3153. // Restore the old addr mode info.
  3154. AddrMode = BackupAddrMode;
  3155. AddrModeInsts.resize(OldSize);
  3156. TPT.rollback(LastKnownGood);
  3157. // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
  3158. if (matchAddr(AddrInst->getOperand(0), Depth+1) &&
  3159. matchAddr(AddrInst->getOperand(1), Depth+1))
  3160. return true;
  3161. // Otherwise we definitely can't merge the ADD in.
  3162. AddrMode = BackupAddrMode;
  3163. AddrModeInsts.resize(OldSize);
  3164. TPT.rollback(LastKnownGood);
  3165. break;
  3166. }
  3167. //case Instruction::Or:
  3168. // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
  3169. //break;
  3170. case Instruction::Mul:
  3171. case Instruction::Shl: {
  3172. // Can only handle X*C and X << C.
  3173. ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
  3174. if (!RHS)
  3175. return false;
  3176. int64_t Scale = RHS->getSExtValue();
  3177. if (Opcode == Instruction::Shl)
  3178. Scale = 1LL << Scale;
  3179. return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
  3180. }
  3181. case Instruction::GetElementPtr: {
  3182. // Scan the GEP. We check it if it contains constant offsets and at most
  3183. // one variable offset.
  3184. int VariableOperand = -1;
  3185. unsigned VariableScale = 0;
  3186. int64_t ConstantOffset = 0;
  3187. gep_type_iterator GTI = gep_type_begin(AddrInst);
  3188. for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
  3189. if (StructType *STy = GTI.getStructTypeOrNull()) {
  3190. const StructLayout *SL = DL.getStructLayout(STy);
  3191. unsigned Idx =
  3192. cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
  3193. ConstantOffset += SL->getElementOffset(Idx);
  3194. } else {
  3195. uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
  3196. if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
  3197. ConstantOffset += CI->getSExtValue()*TypeSize;
  3198. } else if (TypeSize) { // Scales of zero don't do anything.
  3199. // We only allow one variable index at the moment.
  3200. if (VariableOperand != -1)
  3201. return false;
  3202. // Remember the variable index.
  3203. VariableOperand = i;
  3204. VariableScale = TypeSize;
  3205. }
  3206. }
  3207. }
  3208. // A common case is for the GEP to only do a constant offset. In this case,
  3209. // just add it to the disp field and check validity.
  3210. if (VariableOperand == -1) {
  3211. AddrMode.BaseOffs += ConstantOffset;
  3212. if (ConstantOffset == 0 ||
  3213. TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
  3214. // Check to see if we can fold the base pointer in too.
  3215. if (matchAddr(AddrInst->getOperand(0), Depth+1))
  3216. return true;
  3217. }
  3218. AddrMode.BaseOffs -= ConstantOffset;
  3219. return false;
  3220. }
  3221. // Save the valid addressing mode in case we can't match.
  3222. ExtAddrMode BackupAddrMode = AddrMode;
  3223. unsigned OldSize = AddrModeInsts.size();
  3224. // See if the scale and offset amount is valid for this target.
  3225. AddrMode.BaseOffs += ConstantOffset;
  3226. // Match the base operand of the GEP.
  3227. if (!matchAddr(AddrInst->getOperand(0), Depth+1)) {
  3228. // If it couldn't be matched, just stuff the value in a register.
  3229. if (AddrMode.HasBaseReg) {
  3230. AddrMode = BackupAddrMode;
  3231. AddrModeInsts.resize(OldSize);
  3232. return false;
  3233. }
  3234. AddrMode.HasBaseReg = true;
  3235. AddrMode.BaseReg = AddrInst->getOperand(0);
  3236. }
  3237. // Match the remaining variable portion of the GEP.
  3238. if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
  3239. Depth)) {
  3240. // If it couldn't be matched, try stuffing the base into a register
  3241. // instead of matching it, and retrying the match of the scale.
  3242. AddrMode = BackupAddrMode;
  3243. AddrModeInsts.resize(OldSize);
  3244. if (AddrMode.HasBaseReg)
  3245. return false;
  3246. AddrMode.HasBaseReg = true;
  3247. AddrMode.BaseReg = AddrInst->getOperand(0);
  3248. AddrMode.BaseOffs += ConstantOffset;
  3249. if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
  3250. VariableScale, Depth)) {
  3251. // If even that didn't work, bail.
  3252. AddrMode = BackupAddrMode;
  3253. AddrModeInsts.resize(OldSize);
  3254. return false;
  3255. }
  3256. }
  3257. return true;
  3258. }
  3259. case Instruction::SExt:
  3260. case Instruction::ZExt: {
  3261. Instruction *Ext = dyn_cast<Instruction>(AddrInst);
  3262. if (!Ext)
  3263. return false;
  3264. // Try to move this ext out of the way of the addressing mode.
  3265. // Ask for a method for doing so.
  3266. TypePromotionHelper::Action TPH =
  3267. TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
  3268. if (!TPH)
  3269. return false;
  3270. TypePromotionTransaction::ConstRestorationPt LastKnownGood =
  3271. TPT.getRestorationPoint();
  3272. unsigned CreatedInstsCost = 0;
  3273. unsigned ExtCost = !TLI.isExtFree(Ext);
  3274. Value *PromotedOperand =
  3275. TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
  3276. // SExt has been moved away.
  3277. // Thus either it will be rematched later in the recursive calls or it is
  3278. // gone. Anyway, we must not fold it into the addressing mode at this point.
  3279. // E.g.,
  3280. // op = add opnd, 1
  3281. // idx = ext op
  3282. // addr = gep base, idx
  3283. // is now:
  3284. // promotedOpnd = ext opnd <- no match here
  3285. // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
  3286. // addr = gep base, op <- match
  3287. if (MovedAway)
  3288. *MovedAway = true;
  3289. assert(PromotedOperand &&
  3290. "TypePromotionHelper should have filtered out those cases");
  3291. ExtAddrMode BackupAddrMode = AddrMode;
  3292. unsigned OldSize = AddrModeInsts.size();
  3293. if (!matchAddr(PromotedOperand, Depth) ||
  3294. // The total of the new cost is equal to the cost of the created
  3295. // instructions.
  3296. // The total of the old cost is equal to the cost of the extension plus
  3297. // what we have saved in the addressing mode.
  3298. !isPromotionProfitable(CreatedInstsCost,
  3299. ExtCost + (AddrModeInsts.size() - OldSize),
  3300. PromotedOperand)) {
  3301. AddrMode = BackupAddrMode;
  3302. AddrModeInsts.resize(OldSize);
  3303. DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
  3304. TPT.rollback(LastKnownGood);
  3305. return false;
  3306. }
  3307. return true;
  3308. }
  3309. }
  3310. return false;
  3311. }
  3312. /// If we can, try to add the value of 'Addr' into the current addressing mode.
  3313. /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
  3314. /// unmodified. This assumes that Addr is either a pointer type or intptr_t
  3315. /// for the target.
  3316. ///
  3317. bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
  3318. // Start a transaction at this point that we will rollback if the matching
  3319. // fails.
  3320. TypePromotionTransaction::ConstRestorationPt LastKnownGood =
  3321. TPT.getRestorationPoint();
  3322. if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
  3323. // Fold in immediates if legal for the target.
  3324. AddrMode.BaseOffs += CI->getSExtValue();
  3325. if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
  3326. return true;
  3327. AddrMode.BaseOffs -= CI->getSExtValue();
  3328. } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
  3329. // If this is a global variable, try to fold it into the addressing mode.
  3330. if (!AddrMode.BaseGV) {
  3331. AddrMode.BaseGV = GV;
  3332. if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
  3333. return true;
  3334. AddrMode.BaseGV = nullptr;
  3335. }
  3336. } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
  3337. ExtAddrMode BackupAddrMode = AddrMode;
  3338. unsigned OldSize = AddrModeInsts.size();
  3339. // Check to see if it is possible to fold this operation.
  3340. bool MovedAway = false;
  3341. if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
  3342. // This instruction may have been moved away. If so, there is nothing
  3343. // to check here.
  3344. if (MovedAway)
  3345. return true;
  3346. // Okay, it's possible to fold this. Check to see if it is actually
  3347. // *profitable* to do so. We use a simple cost model to avoid increasing
  3348. // register pressure too much.
  3349. if (I->hasOneUse() ||
  3350. isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
  3351. AddrModeInsts.push_back(I);
  3352. return true;
  3353. }
  3354. // It isn't profitable to do this, roll back.
  3355. //cerr << "NOT FOLDING: " << *I;
  3356. AddrMode = BackupAddrMode;
  3357. AddrModeInsts.resize(OldSize);
  3358. TPT.rollback(LastKnownGood);
  3359. }
  3360. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
  3361. if (matchOperationAddr(CE, CE->getOpcode(), Depth))
  3362. return true;
  3363. TPT.rollback(LastKnownGood);
  3364. } else if (isa<ConstantPointerNull>(Addr)) {
  3365. // Null pointer gets folded without affecting the addressing mode.
  3366. return true;
  3367. }
  3368. // Worse case, the target should support [reg] addressing modes. :)
  3369. if (!AddrMode.HasBaseReg) {
  3370. AddrMode.HasBaseReg = true;
  3371. AddrMode.BaseReg = Addr;
  3372. // Still check for legality in case the target supports [imm] but not [i+r].
  3373. if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
  3374. return true;
  3375. AddrMode.HasBaseReg = false;
  3376. AddrMode.BaseReg = nullptr;
  3377. }
  3378. // If the base register is already taken, see if we can do [r+r].
  3379. if (AddrMode.Scale == 0) {
  3380. AddrMode.Scale = 1;
  3381. AddrMode.ScaledReg = Addr;
  3382. if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
  3383. return true;
  3384. AddrMode.Scale = 0;
  3385. AddrMode.ScaledReg = nullptr;
  3386. }
  3387. // Couldn't match.
  3388. TPT.rollback(LastKnownGood);
  3389. return false;
  3390. }
  3391. /// Check to see if all uses of OpVal by the specified inline asm call are due
  3392. /// to memory operands. If so, return true, otherwise return false.
  3393. static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
  3394. const TargetLowering &TLI,
  3395. const TargetRegisterInfo &TRI) {
  3396. const Function *F = CI->getParent()->getParent();
  3397. TargetLowering::AsmOperandInfoVector TargetConstraints =
  3398. TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI,
  3399. ImmutableCallSite(CI));
  3400. for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
  3401. TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
  3402. // Compute the constraint code and ConstraintType to use.
  3403. TLI.ComputeConstraintToUse(OpInfo, SDValue());
  3404. // If this asm operand is our Value*, and if it isn't an indirect memory
  3405. // operand, we can't fold it!
  3406. if (OpInfo.CallOperandVal == OpVal &&
  3407. (OpInfo.ConstraintType != TargetLowering::C_Memory ||
  3408. !OpInfo.isIndirect))
  3409. return false;
  3410. }
  3411. return true;
  3412. }
  3413. /// Recursively walk all the uses of I until we find a memory use.
  3414. /// If we find an obviously non-foldable instruction, return true.
  3415. /// Add the ultimately found memory instructions to MemoryUses.
  3416. static bool FindAllMemoryUses(
  3417. Instruction *I,
  3418. SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
  3419. SmallPtrSetImpl<Instruction *> &ConsideredInsts,
  3420. const TargetLowering &TLI, const TargetRegisterInfo &TRI) {
  3421. // If we already considered this instruction, we're done.
  3422. if (!ConsideredInsts.insert(I).second)
  3423. return false;
  3424. // If this is an obviously unfoldable instruction, bail out.
  3425. if (!MightBeFoldableInst(I))
  3426. return true;
  3427. const bool OptSize = I->getFunction()->optForSize();
  3428. // Loop over all the uses, recursively processing them.
  3429. for (Use &U : I->uses()) {
  3430. Instruction *UserI = cast<Instruction>(U.getUser());
  3431. if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
  3432. MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
  3433. continue;
  3434. }
  3435. if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
  3436. unsigned opNo = U.getOperandNo();
  3437. if (opNo != StoreInst::getPointerOperandIndex())
  3438. return true; // Storing addr, not into addr.
  3439. MemoryUses.push_back(std::make_pair(SI, opNo));
  3440. continue;
  3441. }
  3442. if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
  3443. unsigned opNo = U.getOperandNo();
  3444. if (opNo != AtomicRMWInst::getPointerOperandIndex())
  3445. return true; // Storing addr, not into addr.
  3446. MemoryUses.push_back(std::make_pair(RMW, opNo));
  3447. continue;
  3448. }
  3449. if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
  3450. unsigned opNo = U.getOperandNo();
  3451. if (opNo != AtomicCmpXchgInst::getPointerOperandIndex())
  3452. return true; // Storing addr, not into addr.
  3453. MemoryUses.push_back(std::make_pair(CmpX, opNo));
  3454. continue;
  3455. }
  3456. if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
  3457. // If this is a cold call, we can sink the addressing calculation into
  3458. // the cold path. See optimizeCallInst
  3459. if (!OptSize && CI->hasFnAttr(Attribute::Cold))
  3460. continue;
  3461. InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
  3462. if (!IA) return true;
  3463. // If this is a memory operand, we're cool, otherwise bail out.
  3464. if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
  3465. return true;
  3466. continue;
  3467. }
  3468. if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI))
  3469. return true;
  3470. }
  3471. return false;
  3472. }
  3473. /// Return true if Val is already known to be live at the use site that we're
  3474. /// folding it into. If so, there is no cost to include it in the addressing
  3475. /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
  3476. /// instruction already.
  3477. bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
  3478. Value *KnownLive2) {
  3479. // If Val is either of the known-live values, we know it is live!
  3480. if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
  3481. return true;
  3482. // All values other than instructions and arguments (e.g. constants) are live.
  3483. if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
  3484. // If Val is a constant sized alloca in the entry block, it is live, this is
  3485. // true because it is just a reference to the stack/frame pointer, which is
  3486. // live for the whole function.
  3487. if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
  3488. if (AI->isStaticAlloca())
  3489. return true;
  3490. // Check to see if this value is already used in the memory instruction's
  3491. // block. If so, it's already live into the block at the very least, so we
  3492. // can reasonably fold it.
  3493. return Val->isUsedInBasicBlock(MemoryInst->getParent());
  3494. }
  3495. /// It is possible for the addressing mode of the machine to fold the specified
  3496. /// instruction into a load or store that ultimately uses it.
  3497. /// However, the specified instruction has multiple uses.
  3498. /// Given this, it may actually increase register pressure to fold it
  3499. /// into the load. For example, consider this code:
  3500. ///
  3501. /// X = ...
  3502. /// Y = X+1
  3503. /// use(Y) -> nonload/store
  3504. /// Z = Y+1
  3505. /// load Z
  3506. ///
  3507. /// In this case, Y has multiple uses, and can be folded into the load of Z
  3508. /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
  3509. /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
  3510. /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
  3511. /// number of computations either.
  3512. ///
  3513. /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
  3514. /// X was live across 'load Z' for other reasons, we actually *would* want to
  3515. /// fold the addressing mode in the Z case. This would make Y die earlier.
  3516. bool AddressingModeMatcher::
  3517. isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
  3518. ExtAddrMode &AMAfter) {
  3519. if (IgnoreProfitability) return true;
  3520. // AMBefore is the addressing mode before this instruction was folded into it,
  3521. // and AMAfter is the addressing mode after the instruction was folded. Get
  3522. // the set of registers referenced by AMAfter and subtract out those
  3523. // referenced by AMBefore: this is the set of values which folding in this
  3524. // address extends the lifetime of.
  3525. //
  3526. // Note that there are only two potential values being referenced here,
  3527. // BaseReg and ScaleReg (global addresses are always available, as are any
  3528. // folded immediates).
  3529. Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
  3530. // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
  3531. // lifetime wasn't extended by adding this instruction.
  3532. if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
  3533. BaseReg = nullptr;
  3534. if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
  3535. ScaledReg = nullptr;
  3536. // If folding this instruction (and it's subexprs) didn't extend any live
  3537. // ranges, we're ok with it.
  3538. if (!BaseReg && !ScaledReg)
  3539. return true;
  3540. // If all uses of this instruction can have the address mode sunk into them,
  3541. // we can remove the addressing mode and effectively trade one live register
  3542. // for another (at worst.) In this context, folding an addressing mode into
  3543. // the use is just a particularly nice way of sinking it.
  3544. SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
  3545. SmallPtrSet<Instruction*, 16> ConsideredInsts;
  3546. if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI))
  3547. return false; // Has a non-memory, non-foldable use!
  3548. // Now that we know that all uses of this instruction are part of a chain of
  3549. // computation involving only operations that could theoretically be folded
  3550. // into a memory use, loop over each of these memory operation uses and see
  3551. // if they could *actually* fold the instruction. The assumption is that
  3552. // addressing modes are cheap and that duplicating the computation involved
  3553. // many times is worthwhile, even on a fastpath. For sinking candidates
  3554. // (i.e. cold call sites), this serves as a way to prevent excessive code
  3555. // growth since most architectures have some reasonable small and fast way to
  3556. // compute an effective address. (i.e LEA on x86)
  3557. SmallVector<Instruction*, 32> MatchedAddrModeInsts;
  3558. for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
  3559. Instruction *User = MemoryUses[i].first;
  3560. unsigned OpNo = MemoryUses[i].second;
  3561. // Get the access type of this use. If the use isn't a pointer, we don't
  3562. // know what it accesses.
  3563. Value *Address = User->getOperand(OpNo);
  3564. PointerType *AddrTy = dyn_cast<PointerType>(Address->getType());
  3565. if (!AddrTy)
  3566. return false;
  3567. Type *AddressAccessTy = AddrTy->getElementType();
  3568. unsigned AS = AddrTy->getAddressSpace();
  3569. // Do a match against the root of this address, ignoring profitability. This
  3570. // will tell us if the addressing mode for the memory operation will
  3571. // *actually* cover the shared instruction.
  3572. ExtAddrMode Result;
  3573. TypePromotionTransaction::ConstRestorationPt LastKnownGood =
  3574. TPT.getRestorationPoint();
  3575. AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI,
  3576. AddressAccessTy, AS,
  3577. MemoryInst, Result, InsertedInsts,
  3578. PromotedInsts, TPT);
  3579. Matcher.IgnoreProfitability = true;
  3580. bool Success = Matcher.matchAddr(Address, 0);
  3581. (void)Success; assert(Success && "Couldn't select *anything*?");
  3582. // The match was to check the profitability, the changes made are not
  3583. // part of the original matcher. Therefore, they should be dropped
  3584. // otherwise the original matcher will not present the right state.
  3585. TPT.rollback(LastKnownGood);
  3586. // If the match didn't cover I, then it won't be shared by it.
  3587. if (!is_contained(MatchedAddrModeInsts, I))
  3588. return false;
  3589. MatchedAddrModeInsts.clear();
  3590. }
  3591. return true;
  3592. }
  3593. } // end anonymous namespace
  3594. /// Return true if the specified values are defined in a
  3595. /// different basic block than BB.
  3596. static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
  3597. if (Instruction *I = dyn_cast<Instruction>(V))
  3598. return I->getParent() != BB;
  3599. return false;
  3600. }
  3601. /// Sink addressing mode computation immediate before MemoryInst if doing so
  3602. /// can be done without increasing register pressure. The need for the
  3603. /// register pressure constraint means this can end up being an all or nothing
  3604. /// decision for all uses of the same addressing computation.
  3605. ///
  3606. /// Load and Store Instructions often have addressing modes that can do
  3607. /// significant amounts of computation. As such, instruction selection will try
  3608. /// to get the load or store to do as much computation as possible for the
  3609. /// program. The problem is that isel can only see within a single block. As
  3610. /// such, we sink as much legal addressing mode work into the block as possible.
  3611. ///
  3612. /// This method is used to optimize both load/store and inline asms with memory
  3613. /// operands. It's also used to sink addressing computations feeding into cold
  3614. /// call sites into their (cold) basic block.
  3615. ///
  3616. /// The motivation for handling sinking into cold blocks is that doing so can
  3617. /// both enable other address mode sinking (by satisfying the register pressure
  3618. /// constraint above), and reduce register pressure globally (by removing the
  3619. /// addressing mode computation from the fast path entirely.).
  3620. bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
  3621. Type *AccessTy, unsigned AddrSpace) {
  3622. Value *Repl = Addr;
  3623. // Try to collapse single-value PHI nodes. This is necessary to undo
  3624. // unprofitable PRE transformations.
  3625. SmallVector<Value*, 8> worklist;
  3626. SmallPtrSet<Value*, 16> Visited;
  3627. worklist.push_back(Addr);
  3628. // Use a worklist to iteratively look through PHI nodes, and ensure that
  3629. // the addressing mode obtained from the non-PHI roots of the graph
  3630. // are equivalent.
  3631. Value *Consensus = nullptr;
  3632. unsigned NumUsesConsensus = 0;
  3633. bool IsNumUsesConsensusValid = false;
  3634. SmallVector<Instruction*, 16> AddrModeInsts;
  3635. ExtAddrMode AddrMode;
  3636. TypePromotionTransaction TPT(RemovedInsts);
  3637. TypePromotionTransaction::ConstRestorationPt LastKnownGood =
  3638. TPT.getRestorationPoint();
  3639. while (!worklist.empty()) {
  3640. Value *V = worklist.back();
  3641. worklist.pop_back();
  3642. // Break use-def graph loops.
  3643. if (!Visited.insert(V).second) {
  3644. Consensus = nullptr;
  3645. break;
  3646. }
  3647. // For a PHI node, push all of its incoming values.
  3648. if (PHINode *P = dyn_cast<PHINode>(V)) {
  3649. for (Value *IncValue : P->incoming_values())
  3650. worklist.push_back(IncValue);
  3651. continue;
  3652. }
  3653. // For non-PHIs, determine the addressing mode being computed. Note that
  3654. // the result may differ depending on what other uses our candidate
  3655. // addressing instructions might have.
  3656. SmallVector<Instruction*, 16> NewAddrModeInsts;
  3657. ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
  3658. V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TLI, *TRI,
  3659. InsertedInsts, PromotedInsts, TPT);
  3660. // This check is broken into two cases with very similar code to avoid using
  3661. // getNumUses() as much as possible. Some values have a lot of uses, so
  3662. // calling getNumUses() unconditionally caused a significant compile-time
  3663. // regression.
  3664. if (!Consensus) {
  3665. Consensus = V;
  3666. AddrMode = NewAddrMode;
  3667. AddrModeInsts = NewAddrModeInsts;
  3668. continue;
  3669. } else if (NewAddrMode == AddrMode) {
  3670. if (!IsNumUsesConsensusValid) {
  3671. NumUsesConsensus = Consensus->getNumUses();
  3672. IsNumUsesConsensusValid = true;
  3673. }
  3674. // Ensure that the obtained addressing mode is equivalent to that obtained
  3675. // for all other roots of the PHI traversal. Also, when choosing one
  3676. // such root as representative, select the one with the most uses in order
  3677. // to keep the cost modeling heuristics in AddressingModeMatcher
  3678. // applicable.
  3679. unsigned NumUses = V->getNumUses();
  3680. if (NumUses > NumUsesConsensus) {
  3681. Consensus = V;
  3682. NumUsesConsensus = NumUses;
  3683. AddrModeInsts = NewAddrModeInsts;
  3684. }
  3685. continue;
  3686. }
  3687. Consensus = nullptr;
  3688. break;
  3689. }
  3690. // If the addressing mode couldn't be determined, or if multiple different
  3691. // ones were determined, bail out now.
  3692. if (!Consensus) {
  3693. TPT.rollback(LastKnownGood);
  3694. return false;
  3695. }
  3696. TPT.commit();
  3697. // If all the instructions matched are already in this BB, don't do anything.
  3698. if (none_of(AddrModeInsts, [&](Value *V) {
  3699. return IsNonLocalValue(V, MemoryInst->getParent());
  3700. })) {
  3701. DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n");
  3702. return false;
  3703. }
  3704. // Insert this computation right after this user. Since our caller is
  3705. // scanning from the top of the BB to the bottom, reuse of the expr are
  3706. // guaranteed to happen later.
  3707. IRBuilder<> Builder(MemoryInst);
  3708. // Now that we determined the addressing expression we want to use and know
  3709. // that we have to sink it into this block. Check to see if we have already
  3710. // done this for some other load/store instr in this block. If so, reuse the
  3711. // computation.
  3712. Value *&SunkAddr = SunkAddrs[Addr];
  3713. if (SunkAddr) {
  3714. DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
  3715. << *MemoryInst << "\n");
  3716. if (SunkAddr->getType() != Addr->getType())
  3717. SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
  3718. } else if (AddrSinkUsingGEPs ||
  3719. (!AddrSinkUsingGEPs.getNumOccurrences() && TM &&
  3720. SubtargetInfo->useAA())) {
  3721. // By default, we use the GEP-based method when AA is used later. This
  3722. // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
  3723. DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
  3724. << *MemoryInst << "\n");
  3725. Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
  3726. Value *ResultPtr = nullptr, *ResultIndex = nullptr;
  3727. // First, find the pointer.
  3728. if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
  3729. ResultPtr = AddrMode.BaseReg;
  3730. AddrMode.BaseReg = nullptr;
  3731. }
  3732. if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
  3733. // We can't add more than one pointer together, nor can we scale a
  3734. // pointer (both of which seem meaningless).
  3735. if (ResultPtr || AddrMode.Scale != 1)
  3736. return false;
  3737. ResultPtr = AddrMode.ScaledReg;
  3738. AddrMode.Scale = 0;
  3739. }
  3740. if (AddrMode.BaseGV) {
  3741. if (ResultPtr)
  3742. return false;
  3743. ResultPtr = AddrMode.BaseGV;
  3744. }
  3745. // If the real base value actually came from an inttoptr, then the matcher
  3746. // will look through it and provide only the integer value. In that case,
  3747. // use it here.
  3748. if (!ResultPtr && AddrMode.BaseReg) {
  3749. ResultPtr =
  3750. Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr");
  3751. AddrMode.BaseReg = nullptr;
  3752. } else if (!ResultPtr && AddrMode.Scale == 1) {
  3753. ResultPtr =
  3754. Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr");
  3755. AddrMode.Scale = 0;
  3756. }
  3757. if (!ResultPtr &&
  3758. !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
  3759. SunkAddr = Constant::getNullValue(Addr->getType());
  3760. } else if (!ResultPtr) {
  3761. return false;
  3762. } else {
  3763. Type *I8PtrTy =
  3764. Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
  3765. Type *I8Ty = Builder.getInt8Ty();
  3766. // Start with the base register. Do this first so that subsequent address
  3767. // matching finds it last, which will prevent it from trying to match it
  3768. // as the scaled value in case it happens to be a mul. That would be
  3769. // problematic if we've sunk a different mul for the scale, because then
  3770. // we'd end up sinking both muls.
  3771. if (AddrMode.BaseReg) {
  3772. Value *V = AddrMode.BaseReg;
  3773. if (V->getType() != IntPtrTy)
  3774. V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
  3775. ResultIndex = V;
  3776. }
  3777. // Add the scale value.
  3778. if (AddrMode.Scale) {
  3779. Value *V = AddrMode.ScaledReg;
  3780. if (V->getType() == IntPtrTy) {
  3781. // done.
  3782. } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
  3783. cast<IntegerType>(V->getType())->getBitWidth()) {
  3784. V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
  3785. } else {
  3786. // It is only safe to sign extend the BaseReg if we know that the math
  3787. // required to create it did not overflow before we extend it. Since
  3788. // the original IR value was tossed in favor of a constant back when
  3789. // the AddrMode was created we need to bail out gracefully if widths
  3790. // do not match instead of extending it.
  3791. Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex);
  3792. if (I && (ResultIndex != AddrMode.BaseReg))
  3793. I->eraseFromParent();
  3794. return false;
  3795. }
  3796. if (AddrMode.Scale != 1)
  3797. V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
  3798. "sunkaddr");
  3799. if (ResultIndex)
  3800. ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
  3801. else
  3802. ResultIndex = V;
  3803. }
  3804. // Add in the Base Offset if present.
  3805. if (AddrMode.BaseOffs) {
  3806. Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
  3807. if (ResultIndex) {
  3808. // We need to add this separately from the scale above to help with
  3809. // SDAG consecutive load/store merging.
  3810. if (ResultPtr->getType() != I8PtrTy)
  3811. ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
  3812. ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
  3813. }
  3814. ResultIndex = V;
  3815. }
  3816. if (!ResultIndex) {
  3817. SunkAddr = ResultPtr;
  3818. } else {
  3819. if (ResultPtr->getType() != I8PtrTy)
  3820. ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
  3821. SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
  3822. }
  3823. if (SunkAddr->getType() != Addr->getType())
  3824. SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
  3825. }
  3826. } else {
  3827. DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
  3828. << *MemoryInst << "\n");
  3829. Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
  3830. Value *Result = nullptr;
  3831. // Start with the base register. Do this first so that subsequent address
  3832. // matching finds it last, which will prevent it from trying to match it
  3833. // as the scaled value in case it happens to be a mul. That would be
  3834. // problematic if we've sunk a different mul for the scale, because then
  3835. // we'd end up sinking both muls.
  3836. if (AddrMode.BaseReg) {
  3837. Value *V = AddrMode.BaseReg;
  3838. if (V->getType()->isPointerTy())
  3839. V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
  3840. if (V->getType() != IntPtrTy)
  3841. V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
  3842. Result = V;
  3843. }
  3844. // Add the scale value.
  3845. if (AddrMode.Scale) {
  3846. Value *V = AddrMode.ScaledReg;
  3847. if (V->getType() == IntPtrTy) {
  3848. // done.
  3849. } else if (V->getType()->isPointerTy()) {
  3850. V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
  3851. } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
  3852. cast<IntegerType>(V->getType())->getBitWidth()) {
  3853. V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
  3854. } else {
  3855. // It is only safe to sign extend the BaseReg if we know that the math
  3856. // required to create it did not overflow before we extend it. Since
  3857. // the original IR value was tossed in favor of a constant back when
  3858. // the AddrMode was created we need to bail out gracefully if widths
  3859. // do not match instead of extending it.
  3860. Instruction *I = dyn_cast_or_null<Instruction>(Result);
  3861. if (I && (Result != AddrMode.BaseReg))
  3862. I->eraseFromParent();
  3863. return false;
  3864. }
  3865. if (AddrMode.Scale != 1)
  3866. V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
  3867. "sunkaddr");
  3868. if (Result)
  3869. Result = Builder.CreateAdd(Result, V, "sunkaddr");
  3870. else
  3871. Result = V;
  3872. }
  3873. // Add in the BaseGV if present.
  3874. if (AddrMode.BaseGV) {
  3875. Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
  3876. if (Result)
  3877. Result = Builder.CreateAdd(Result, V, "sunkaddr");
  3878. else
  3879. Result = V;
  3880. }
  3881. // Add in the Base Offset if present.
  3882. if (AddrMode.BaseOffs) {
  3883. Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
  3884. if (Result)
  3885. Result = Builder.CreateAdd(Result, V, "sunkaddr");
  3886. else
  3887. Result = V;
  3888. }
  3889. if (!Result)
  3890. SunkAddr = Constant::getNullValue(Addr->getType());
  3891. else
  3892. SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
  3893. }
  3894. MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
  3895. // If we have no uses, recursively delete the value and all dead instructions
  3896. // using it.
  3897. if (Repl->use_empty()) {
  3898. // This can cause recursive deletion, which can invalidate our iterator.
  3899. // Use a WeakVH to hold onto it in case this happens.
  3900. Value *CurValue = &*CurInstIterator;
  3901. WeakVH IterHandle(CurValue);
  3902. BasicBlock *BB = CurInstIterator->getParent();
  3903. RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
  3904. if (IterHandle != CurValue) {
  3905. // If the iterator instruction was recursively deleted, start over at the
  3906. // start of the block.
  3907. CurInstIterator = BB->begin();
  3908. SunkAddrs.clear();
  3909. }
  3910. }
  3911. ++NumMemoryInsts;
  3912. return true;
  3913. }
  3914. /// If there are any memory operands, use OptimizeMemoryInst to sink their
  3915. /// address computing into the block when possible / profitable.
  3916. bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
  3917. bool MadeChange = false;
  3918. const TargetRegisterInfo *TRI =
  3919. TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo();
  3920. TargetLowering::AsmOperandInfoVector TargetConstraints =
  3921. TLI->ParseConstraints(*DL, TRI, CS);
  3922. unsigned ArgNo = 0;
  3923. for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
  3924. TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
  3925. // Compute the constraint code and ConstraintType to use.
  3926. TLI->ComputeConstraintToUse(OpInfo, SDValue());
  3927. if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
  3928. OpInfo.isIndirect) {
  3929. Value *OpVal = CS->getArgOperand(ArgNo++);
  3930. MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
  3931. } else if (OpInfo.Type == InlineAsm::isInput)
  3932. ArgNo++;
  3933. }
  3934. return MadeChange;
  3935. }
  3936. /// \brief Check if all the uses of \p Val are equivalent (or free) zero or
  3937. /// sign extensions.
  3938. static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
  3939. assert(!Val->use_empty() && "Input must have at least one use");
  3940. const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
  3941. bool IsSExt = isa<SExtInst>(FirstUser);
  3942. Type *ExtTy = FirstUser->getType();
  3943. for (const User *U : Val->users()) {
  3944. const Instruction *UI = cast<Instruction>(U);
  3945. if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
  3946. return false;
  3947. Type *CurTy = UI->getType();
  3948. // Same input and output types: Same instruction after CSE.
  3949. if (CurTy == ExtTy)
  3950. continue;
  3951. // If IsSExt is true, we are in this situation:
  3952. // a = Val
  3953. // b = sext ty1 a to ty2
  3954. // c = sext ty1 a to ty3
  3955. // Assuming ty2 is shorter than ty3, this could be turned into:
  3956. // a = Val
  3957. // b = sext ty1 a to ty2
  3958. // c = sext ty2 b to ty3
  3959. // However, the last sext is not free.
  3960. if (IsSExt)
  3961. return false;
  3962. // This is a ZExt, maybe this is free to extend from one type to another.
  3963. // In that case, we would not account for a different use.
  3964. Type *NarrowTy;
  3965. Type *LargeTy;
  3966. if (ExtTy->getScalarType()->getIntegerBitWidth() >
  3967. CurTy->getScalarType()->getIntegerBitWidth()) {
  3968. NarrowTy = CurTy;
  3969. LargeTy = ExtTy;
  3970. } else {
  3971. NarrowTy = ExtTy;
  3972. LargeTy = CurTy;
  3973. }
  3974. if (!TLI.isZExtFree(NarrowTy, LargeTy))
  3975. return false;
  3976. }
  3977. // All uses are the same or can be derived from one another for free.
  3978. return true;
  3979. }
  3980. /// \brief Try to speculatively promote extensions in \p Exts and continue
  3981. /// promoting through newly promoted operands recursively as far as doing so is
  3982. /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
  3983. /// When some promotion happened, \p TPT contains the proper state to revert
  3984. /// them.
  3985. ///
  3986. /// \return true if some promotion happened, false otherwise.
  3987. bool CodeGenPrepare::tryToPromoteExts(
  3988. TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
  3989. SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
  3990. unsigned CreatedInstsCost) {
  3991. bool Promoted = false;
  3992. // Iterate over all the extensions to try to promote them.
  3993. for (auto I : Exts) {
  3994. // Early check if we directly have ext(load).
  3995. if (isa<LoadInst>(I->getOperand(0))) {
  3996. ProfitablyMovedExts.push_back(I);
  3997. continue;
  3998. }
  3999. // Check whether or not we want to do any promotion. The reason we have
  4000. // this check inside the for loop is to catch the case where an extension
  4001. // is directly fed by a load because in such case the extension can be moved
  4002. // up without any promotion on its operands.
  4003. if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion)
  4004. return false;
  4005. // Get the action to perform the promotion.
  4006. TypePromotionHelper::Action TPH =
  4007. TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
  4008. // Check if we can promote.
  4009. if (!TPH) {
  4010. // Save the current extension as we cannot move up through its operand.
  4011. ProfitablyMovedExts.push_back(I);
  4012. continue;
  4013. }
  4014. // Save the current state.
  4015. TypePromotionTransaction::ConstRestorationPt LastKnownGood =
  4016. TPT.getRestorationPoint();
  4017. SmallVector<Instruction *, 4> NewExts;
  4018. unsigned NewCreatedInstsCost = 0;
  4019. unsigned ExtCost = !TLI->isExtFree(I);
  4020. // Promote.
  4021. Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
  4022. &NewExts, nullptr, *TLI);
  4023. assert(PromotedVal &&
  4024. "TypePromotionHelper should have filtered out those cases");
  4025. // We would be able to merge only one extension in a load.
  4026. // Therefore, if we have more than 1 new extension we heuristically
  4027. // cut this search path, because it means we degrade the code quality.
  4028. // With exactly 2, the transformation is neutral, because we will merge
  4029. // one extension but leave one. However, we optimistically keep going,
  4030. // because the new extension may be removed too.
  4031. long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
  4032. // FIXME: It would be possible to propagate a negative value instead of
  4033. // conservatively ceiling it to 0.
  4034. TotalCreatedInstsCost =
  4035. std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
  4036. if (!StressExtLdPromotion &&
  4037. (TotalCreatedInstsCost > 1 ||
  4038. !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
  4039. // This promotion is not profitable, rollback to the previous state, and
  4040. // save the current extension in ProfitablyMovedExts as the latest
  4041. // speculative promotion turned out to be unprofitable.
  4042. TPT.rollback(LastKnownGood);
  4043. ProfitablyMovedExts.push_back(I);
  4044. continue;
  4045. }
  4046. // Continue promoting NewExts as far as doing so is profitable.
  4047. SmallVector<Instruction *, 2> NewlyMovedExts;
  4048. (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
  4049. bool NewPromoted = false;
  4050. for (auto ExtInst : NewlyMovedExts) {
  4051. Instruction *MovedExt = cast<Instruction>(ExtInst);
  4052. Value *ExtOperand = MovedExt->getOperand(0);
  4053. // If we have reached to a load, we need this extra profitability check
  4054. // as it could potentially be merged into an ext(load).
  4055. if (isa<LoadInst>(ExtOperand) &&
  4056. !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
  4057. (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
  4058. continue;
  4059. ProfitablyMovedExts.push_back(MovedExt);
  4060. NewPromoted = true;
  4061. }
  4062. // If none of speculative promotions for NewExts is profitable, rollback
  4063. // and save the current extension (I) as the last profitable extension.
  4064. if (!NewPromoted) {
  4065. TPT.rollback(LastKnownGood);
  4066. ProfitablyMovedExts.push_back(I);
  4067. continue;
  4068. }
  4069. // The promotion is profitable.
  4070. Promoted = true;
  4071. }
  4072. return Promoted;
  4073. }
  4074. /// Merging redundant sexts when one is dominating the other.
  4075. bool CodeGenPrepare::mergeSExts(Function &F) {
  4076. DominatorTree DT(F);
  4077. bool Changed = false;
  4078. for (auto &Entry : ValToSExtendedUses) {
  4079. SExts &Insts = Entry.second;
  4080. SExts CurPts;
  4081. for (Instruction *Inst : Insts) {
  4082. if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
  4083. Inst->getOperand(0) != Entry.first)
  4084. continue;
  4085. bool inserted = false;
  4086. for (auto &Pt : CurPts) {
  4087. if (DT.dominates(Inst, Pt)) {
  4088. Pt->replaceAllUsesWith(Inst);
  4089. RemovedInsts.insert(Pt);
  4090. Pt->removeFromParent();
  4091. Pt = Inst;
  4092. inserted = true;
  4093. Changed = true;
  4094. break;
  4095. }
  4096. if (!DT.dominates(Pt, Inst))
  4097. // Give up if we need to merge in a common dominator as the
  4098. // expermients show it is not profitable.
  4099. continue;
  4100. Inst->replaceAllUsesWith(Pt);
  4101. RemovedInsts.insert(Inst);
  4102. Inst->removeFromParent();
  4103. inserted = true;
  4104. Changed = true;
  4105. break;
  4106. }
  4107. if (!inserted)
  4108. CurPts.push_back(Inst);
  4109. }
  4110. }
  4111. return Changed;
  4112. }
  4113. /// Return true, if an ext(load) can be formed from an extension in
  4114. /// \p MovedExts.
  4115. bool CodeGenPrepare::canFormExtLd(
  4116. const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
  4117. Instruction *&Inst, bool HasPromoted) {
  4118. for (auto *MovedExtInst : MovedExts) {
  4119. if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
  4120. LI = cast<LoadInst>(MovedExtInst->getOperand(0));
  4121. Inst = MovedExtInst;
  4122. break;
  4123. }
  4124. }
  4125. if (!LI)
  4126. return false;
  4127. // If they're already in the same block, there's nothing to do.
  4128. // Make the cheap checks first if we did not promote.
  4129. // If we promoted, we need to check if it is indeed profitable.
  4130. if (!HasPromoted && LI->getParent() == Inst->getParent())
  4131. return false;
  4132. EVT VT = TLI->getValueType(*DL, Inst->getType());
  4133. EVT LoadVT = TLI->getValueType(*DL, LI->getType());
  4134. // If the load has other users and the truncate is not free, this probably
  4135. // isn't worthwhile.
  4136. if (!LI->hasOneUse() && (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) &&
  4137. !TLI->isTruncateFree(Inst->getType(), LI->getType()))
  4138. return false;
  4139. // Check whether the target supports casts folded into loads.
  4140. unsigned LType;
  4141. if (isa<ZExtInst>(Inst))
  4142. LType = ISD::ZEXTLOAD;
  4143. else {
  4144. assert(isa<SExtInst>(Inst) && "Unexpected ext type!");
  4145. LType = ISD::SEXTLOAD;
  4146. }
  4147. return TLI->isLoadExtLegal(LType, VT, LoadVT);
  4148. }
  4149. /// Move a zext or sext fed by a load into the same basic block as the load,
  4150. /// unless conditions are unfavorable. This allows SelectionDAG to fold the
  4151. /// extend into the load.
  4152. ///
  4153. /// E.g.,
  4154. /// \code
  4155. /// %ld = load i32* %addr
  4156. /// %add = add nuw i32 %ld, 4
  4157. /// %zext = zext i32 %add to i64
  4158. // \endcode
  4159. /// =>
  4160. /// \code
  4161. /// %ld = load i32* %addr
  4162. /// %zext = zext i32 %ld to i64
  4163. /// %add = add nuw i64 %zext, 4
  4164. /// \encode
  4165. /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
  4166. /// allow us to match zext(load i32*) to i64.
  4167. ///
  4168. /// Also, try to promote the computations used to obtain a sign extended
  4169. /// value used into memory accesses.
  4170. /// E.g.,
  4171. /// \code
  4172. /// a = add nsw i32 b, 3
  4173. /// d = sext i32 a to i64
  4174. /// e = getelementptr ..., i64 d
  4175. /// \endcode
  4176. /// =>
  4177. /// \code
  4178. /// f = sext i32 b to i64
  4179. /// a = add nsw i64 f, 3
  4180. /// e = getelementptr ..., i64 a
  4181. /// \endcode
  4182. ///
  4183. /// \p Inst[in/out] the extension may be modified during the process if some
  4184. /// promotions apply.
  4185. bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
  4186. // ExtLoad formation and address type promotion infrastructure requires TLI to
  4187. // be effective.
  4188. if (!TLI)
  4189. return false;
  4190. bool AllowPromotionWithoutCommonHeader = false;
  4191. /// See if it is an interesting sext operations for the address type
  4192. /// promotion before trying to promote it, e.g., the ones with the right
  4193. /// type and used in memory accesses.
  4194. bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
  4195. *Inst, AllowPromotionWithoutCommonHeader);
  4196. TypePromotionTransaction TPT(RemovedInsts);
  4197. TypePromotionTransaction::ConstRestorationPt LastKnownGood =
  4198. TPT.getRestorationPoint();
  4199. SmallVector<Instruction *, 1> Exts;
  4200. SmallVector<Instruction *, 2> SpeculativelyMovedExts;
  4201. Exts.push_back(Inst);
  4202. bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
  4203. // Look for a load being extended.
  4204. LoadInst *LI = nullptr;
  4205. Instruction *ExtFedByLoad;
  4206. // Try to promote a chain of computation if it allows to form an extended
  4207. // load.
  4208. if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
  4209. assert(LI && ExtFedByLoad && "Expect a valid load and extension");
  4210. TPT.commit();
  4211. // Move the extend into the same block as the load
  4212. ExtFedByLoad->removeFromParent();
  4213. ExtFedByLoad->insertAfter(LI);
  4214. // CGP does not check if the zext would be speculatively executed when moved
  4215. // to the same basic block as the load. Preserving its original location
  4216. // would pessimize the debugging experience, as well as negatively impact
  4217. // the quality of sample pgo. We don't want to use "line 0" as that has a
  4218. // size cost in the line-table section and logically the zext can be seen as
  4219. // part of the load. Therefore we conservatively reuse the same debug
  4220. // location for the load and the zext.
  4221. ExtFedByLoad->setDebugLoc(LI->getDebugLoc());
  4222. ++NumExtsMoved;
  4223. Inst = ExtFedByLoad;
  4224. return true;
  4225. }
  4226. // Continue promoting SExts if known as considerable depending on targets.
  4227. if (ATPConsiderable &&
  4228. performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
  4229. HasPromoted, TPT, SpeculativelyMovedExts))
  4230. return true;
  4231. TPT.rollback(LastKnownGood);
  4232. return false;
  4233. }
  4234. // Perform address type promotion if doing so is profitable.
  4235. // If AllowPromotionWithoutCommonHeader == false, we should find other sext
  4236. // instructions that sign extended the same initial value. However, if
  4237. // AllowPromotionWithoutCommonHeader == true, we expect promoting the
  4238. // extension is just profitable.
  4239. bool CodeGenPrepare::performAddressTypePromotion(
  4240. Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
  4241. bool HasPromoted, TypePromotionTransaction &TPT,
  4242. SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
  4243. bool Promoted = false;
  4244. SmallPtrSet<Instruction *, 1> UnhandledExts;
  4245. bool AllSeenFirst = true;
  4246. for (auto I : SpeculativelyMovedExts) {
  4247. Value *HeadOfChain = I->getOperand(0);
  4248. DenseMap<Value *, Instruction *>::iterator AlreadySeen =
  4249. SeenChainsForSExt.find(HeadOfChain);
  4250. // If there is an unhandled SExt which has the same header, try to promote
  4251. // it as well.
  4252. if (AlreadySeen != SeenChainsForSExt.end()) {
  4253. if (AlreadySeen->second != nullptr)
  4254. UnhandledExts.insert(AlreadySeen->second);
  4255. AllSeenFirst = false;
  4256. }
  4257. }
  4258. if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
  4259. SpeculativelyMovedExts.size() == 1)) {
  4260. TPT.commit();
  4261. if (HasPromoted)
  4262. Promoted = true;
  4263. for (auto I : SpeculativelyMovedExts) {
  4264. Value *HeadOfChain = I->getOperand(0);
  4265. SeenChainsForSExt[HeadOfChain] = nullptr;
  4266. ValToSExtendedUses[HeadOfChain].push_back(I);
  4267. }
  4268. // Update Inst as promotion happen.
  4269. Inst = SpeculativelyMovedExts.pop_back_val();
  4270. } else {
  4271. // This is the first chain visited from the header, keep the current chain
  4272. // as unhandled. Defer to promote this until we encounter another SExt
  4273. // chain derived from the same header.
  4274. for (auto I : SpeculativelyMovedExts) {
  4275. Value *HeadOfChain = I->getOperand(0);
  4276. SeenChainsForSExt[HeadOfChain] = Inst;
  4277. }
  4278. return false;
  4279. }
  4280. if (!AllSeenFirst && !UnhandledExts.empty())
  4281. for (auto VisitedSExt : UnhandledExts) {
  4282. if (RemovedInsts.count(VisitedSExt))
  4283. continue;
  4284. TypePromotionTransaction TPT(RemovedInsts);
  4285. SmallVector<Instruction *, 1> Exts;
  4286. SmallVector<Instruction *, 2> Chains;
  4287. Exts.push_back(VisitedSExt);
  4288. bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
  4289. TPT.commit();
  4290. if (HasPromoted)
  4291. Promoted = true;
  4292. for (auto I : Chains) {
  4293. Value *HeadOfChain = I->getOperand(0);
  4294. // Mark this as handled.
  4295. SeenChainsForSExt[HeadOfChain] = nullptr;
  4296. ValToSExtendedUses[HeadOfChain].push_back(I);
  4297. }
  4298. }
  4299. return Promoted;
  4300. }
  4301. bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
  4302. BasicBlock *DefBB = I->getParent();
  4303. // If the result of a {s|z}ext and its source are both live out, rewrite all
  4304. // other uses of the source with result of extension.
  4305. Value *Src = I->getOperand(0);
  4306. if (Src->hasOneUse())
  4307. return false;
  4308. // Only do this xform if truncating is free.
  4309. if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
  4310. return false;
  4311. // Only safe to perform the optimization if the source is also defined in
  4312. // this block.
  4313. if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
  4314. return false;
  4315. bool DefIsLiveOut = false;
  4316. for (User *U : I->users()) {
  4317. Instruction *UI = cast<Instruction>(U);
  4318. // Figure out which BB this ext is used in.
  4319. BasicBlock *UserBB = UI->getParent();
  4320. if (UserBB == DefBB) continue;
  4321. DefIsLiveOut = true;
  4322. break;
  4323. }
  4324. if (!DefIsLiveOut)
  4325. return false;
  4326. // Make sure none of the uses are PHI nodes.
  4327. for (User *U : Src->users()) {
  4328. Instruction *UI = cast<Instruction>(U);
  4329. BasicBlock *UserBB = UI->getParent();
  4330. if (UserBB == DefBB) continue;
  4331. // Be conservative. We don't want this xform to end up introducing
  4332. // reloads just before load / store instructions.
  4333. if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
  4334. return false;
  4335. }
  4336. // InsertedTruncs - Only insert one trunc in each block once.
  4337. DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
  4338. bool MadeChange = false;
  4339. for (Use &U : Src->uses()) {
  4340. Instruction *User = cast<Instruction>(U.getUser());
  4341. // Figure out which BB this ext is used in.
  4342. BasicBlock *UserBB = User->getParent();
  4343. if (UserBB == DefBB) continue;
  4344. // Both src and def are live in this block. Rewrite the use.
  4345. Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
  4346. if (!InsertedTrunc) {
  4347. BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
  4348. assert(InsertPt != UserBB->end());
  4349. InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt);
  4350. InsertedInsts.insert(InsertedTrunc);
  4351. }
  4352. // Replace a use of the {s|z}ext source with a use of the result.
  4353. U = InsertedTrunc;
  4354. ++NumExtUses;
  4355. MadeChange = true;
  4356. }
  4357. return MadeChange;
  4358. }
  4359. // Find loads whose uses only use some of the loaded value's bits. Add an "and"
  4360. // just after the load if the target can fold this into one extload instruction,
  4361. // with the hope of eliminating some of the other later "and" instructions using
  4362. // the loaded value. "and"s that are made trivially redundant by the insertion
  4363. // of the new "and" are removed by this function, while others (e.g. those whose
  4364. // path from the load goes through a phi) are left for isel to potentially
  4365. // remove.
  4366. //
  4367. // For example:
  4368. //
  4369. // b0:
  4370. // x = load i32
  4371. // ...
  4372. // b1:
  4373. // y = and x, 0xff
  4374. // z = use y
  4375. //
  4376. // becomes:
  4377. //
  4378. // b0:
  4379. // x = load i32
  4380. // x' = and x, 0xff
  4381. // ...
  4382. // b1:
  4383. // z = use x'
  4384. //
  4385. // whereas:
  4386. //
  4387. // b0:
  4388. // x1 = load i32
  4389. // ...
  4390. // b1:
  4391. // x2 = load i32
  4392. // ...
  4393. // b2:
  4394. // x = phi x1, x2
  4395. // y = and x, 0xff
  4396. //
  4397. // becomes (after a call to optimizeLoadExt for each load):
  4398. //
  4399. // b0:
  4400. // x1 = load i32
  4401. // x1' = and x1, 0xff
  4402. // ...
  4403. // b1:
  4404. // x2 = load i32
  4405. // x2' = and x2, 0xff
  4406. // ...
  4407. // b2:
  4408. // x = phi x1', x2'
  4409. // y = and x, 0xff
  4410. //
  4411. bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
  4412. if (!Load->isSimple() ||
  4413. !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy()))
  4414. return false;
  4415. // Skip loads we've already transformed.
  4416. if (Load->hasOneUse() &&
  4417. InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
  4418. return false;
  4419. // Look at all uses of Load, looking through phis, to determine how many bits
  4420. // of the loaded value are needed.
  4421. SmallVector<Instruction *, 8> WorkList;
  4422. SmallPtrSet<Instruction *, 16> Visited;
  4423. SmallVector<Instruction *, 8> AndsToMaybeRemove;
  4424. for (auto *U : Load->users())
  4425. WorkList.push_back(cast<Instruction>(U));
  4426. EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
  4427. unsigned BitWidth = LoadResultVT.getSizeInBits();
  4428. APInt DemandBits(BitWidth, 0);
  4429. APInt WidestAndBits(BitWidth, 0);
  4430. while (!WorkList.empty()) {
  4431. Instruction *I = WorkList.back();
  4432. WorkList.pop_back();
  4433. // Break use-def graph loops.
  4434. if (!Visited.insert(I).second)
  4435. continue;
  4436. // For a PHI node, push all of its users.
  4437. if (auto *Phi = dyn_cast<PHINode>(I)) {
  4438. for (auto *U : Phi->users())
  4439. WorkList.push_back(cast<Instruction>(U));
  4440. continue;
  4441. }
  4442. switch (I->getOpcode()) {
  4443. case llvm::Instruction::And: {
  4444. auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
  4445. if (!AndC)
  4446. return false;
  4447. APInt AndBits = AndC->getValue();
  4448. DemandBits |= AndBits;
  4449. // Keep track of the widest and mask we see.
  4450. if (AndBits.ugt(WidestAndBits))
  4451. WidestAndBits = AndBits;
  4452. if (AndBits == WidestAndBits && I->getOperand(0) == Load)
  4453. AndsToMaybeRemove.push_back(I);
  4454. break;
  4455. }
  4456. case llvm::Instruction::Shl: {
  4457. auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
  4458. if (!ShlC)
  4459. return false;
  4460. uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
  4461. auto ShlDemandBits = APInt::getAllOnesValue(BitWidth).lshr(ShiftAmt);
  4462. DemandBits |= ShlDemandBits;
  4463. break;
  4464. }
  4465. case llvm::Instruction::Trunc: {
  4466. EVT TruncVT = TLI->getValueType(*DL, I->getType());
  4467. unsigned TruncBitWidth = TruncVT.getSizeInBits();
  4468. auto TruncBits = APInt::getAllOnesValue(TruncBitWidth).zext(BitWidth);
  4469. DemandBits |= TruncBits;
  4470. break;
  4471. }
  4472. default:
  4473. return false;
  4474. }
  4475. }
  4476. uint32_t ActiveBits = DemandBits.getActiveBits();
  4477. // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
  4478. // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example,
  4479. // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
  4480. // (and (load x) 1) is not matched as a single instruction, rather as a LDR
  4481. // followed by an AND.
  4482. // TODO: Look into removing this restriction by fixing backends to either
  4483. // return false for isLoadExtLegal for i1 or have them select this pattern to
  4484. // a single instruction.
  4485. //
  4486. // Also avoid hoisting if we didn't see any ands with the exact DemandBits
  4487. // mask, since these are the only ands that will be removed by isel.
  4488. if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
  4489. WidestAndBits != DemandBits)
  4490. return false;
  4491. LLVMContext &Ctx = Load->getType()->getContext();
  4492. Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
  4493. EVT TruncVT = TLI->getValueType(*DL, TruncTy);
  4494. // Reject cases that won't be matched as extloads.
  4495. if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
  4496. !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
  4497. return false;
  4498. IRBuilder<> Builder(Load->getNextNode());
  4499. auto *NewAnd = dyn_cast<Instruction>(
  4500. Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
  4501. // Mark this instruction as "inserted by CGP", so that other
  4502. // optimizations don't touch it.
  4503. InsertedInsts.insert(NewAnd);
  4504. // Replace all uses of load with new and (except for the use of load in the
  4505. // new and itself).
  4506. Load->replaceAllUsesWith(NewAnd);
  4507. NewAnd->setOperand(0, Load);
  4508. // Remove any and instructions that are now redundant.
  4509. for (auto *And : AndsToMaybeRemove)
  4510. // Check that the and mask is the same as the one we decided to put on the
  4511. // new and.
  4512. if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
  4513. And->replaceAllUsesWith(NewAnd);
  4514. if (&*CurInstIterator == And)
  4515. CurInstIterator = std::next(And->getIterator());
  4516. And->eraseFromParent();
  4517. ++NumAndUses;
  4518. }
  4519. ++NumAndsAdded;
  4520. return true;
  4521. }
  4522. /// Check if V (an operand of a select instruction) is an expensive instruction
  4523. /// that is only used once.
  4524. static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
  4525. auto *I = dyn_cast<Instruction>(V);
  4526. // If it's safe to speculatively execute, then it should not have side
  4527. // effects; therefore, it's safe to sink and possibly *not* execute.
  4528. return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
  4529. TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive;
  4530. }
  4531. /// Returns true if a SelectInst should be turned into an explicit branch.
  4532. static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
  4533. const TargetLowering *TLI,
  4534. SelectInst *SI) {
  4535. // If even a predictable select is cheap, then a branch can't be cheaper.
  4536. if (!TLI->isPredictableSelectExpensive())
  4537. return false;
  4538. // FIXME: This should use the same heuristics as IfConversion to determine
  4539. // whether a select is better represented as a branch.
  4540. // If metadata tells us that the select condition is obviously predictable,
  4541. // then we want to replace the select with a branch.
  4542. uint64_t TrueWeight, FalseWeight;
  4543. if (SI->extractProfMetadata(TrueWeight, FalseWeight)) {
  4544. uint64_t Max = std::max(TrueWeight, FalseWeight);
  4545. uint64_t Sum = TrueWeight + FalseWeight;
  4546. if (Sum != 0) {
  4547. auto Probability = BranchProbability::getBranchProbability(Max, Sum);
  4548. if (Probability > TLI->getPredictableBranchThreshold())
  4549. return true;
  4550. }
  4551. }
  4552. CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
  4553. // If a branch is predictable, an out-of-order CPU can avoid blocking on its
  4554. // comparison condition. If the compare has more than one use, there's
  4555. // probably another cmov or setcc around, so it's not worth emitting a branch.
  4556. if (!Cmp || !Cmp->hasOneUse())
  4557. return false;
  4558. // If either operand of the select is expensive and only needed on one side
  4559. // of the select, we should form a branch.
  4560. if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
  4561. sinkSelectOperand(TTI, SI->getFalseValue()))
  4562. return true;
  4563. return false;
  4564. }
  4565. /// If \p isTrue is true, return the true value of \p SI, otherwise return
  4566. /// false value of \p SI. If the true/false value of \p SI is defined by any
  4567. /// select instructions in \p Selects, look through the defining select
  4568. /// instruction until the true/false value is not defined in \p Selects.
  4569. static Value *getTrueOrFalseValue(
  4570. SelectInst *SI, bool isTrue,
  4571. const SmallPtrSet<const Instruction *, 2> &Selects) {
  4572. Value *V;
  4573. for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
  4574. DefSI = dyn_cast<SelectInst>(V)) {
  4575. assert(DefSI->getCondition() == SI->getCondition() &&
  4576. "The condition of DefSI does not match with SI");
  4577. V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
  4578. }
  4579. return V;
  4580. }
  4581. /// If we have a SelectInst that will likely profit from branch prediction,
  4582. /// turn it into a branch.
  4583. bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
  4584. // Find all consecutive select instructions that share the same condition.
  4585. SmallVector<SelectInst *, 2> ASI;
  4586. ASI.push_back(SI);
  4587. for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
  4588. It != SI->getParent()->end(); ++It) {
  4589. SelectInst *I = dyn_cast<SelectInst>(&*It);
  4590. if (I && SI->getCondition() == I->getCondition()) {
  4591. ASI.push_back(I);
  4592. } else {
  4593. break;
  4594. }
  4595. }
  4596. SelectInst *LastSI = ASI.back();
  4597. // Increment the current iterator to skip all the rest of select instructions
  4598. // because they will be either "not lowered" or "all lowered" to branch.
  4599. CurInstIterator = std::next(LastSI->getIterator());
  4600. bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
  4601. // Can we convert the 'select' to CF ?
  4602. if (DisableSelectToBranch || OptSize || !TLI || VectorCond ||
  4603. SI->getMetadata(LLVMContext::MD_unpredictable))
  4604. return false;
  4605. TargetLowering::SelectSupportKind SelectKind;
  4606. if (VectorCond)
  4607. SelectKind = TargetLowering::VectorMaskSelect;
  4608. else if (SI->getType()->isVectorTy())
  4609. SelectKind = TargetLowering::ScalarCondVectorVal;
  4610. else
  4611. SelectKind = TargetLowering::ScalarValSelect;
  4612. if (TLI->isSelectSupported(SelectKind) &&
  4613. !isFormingBranchFromSelectProfitable(TTI, TLI, SI))
  4614. return false;
  4615. ModifiedDT = true;
  4616. // Transform a sequence like this:
  4617. // start:
  4618. // %cmp = cmp uge i32 %a, %b
  4619. // %sel = select i1 %cmp, i32 %c, i32 %d
  4620. //
  4621. // Into:
  4622. // start:
  4623. // %cmp = cmp uge i32 %a, %b
  4624. // br i1 %cmp, label %select.true, label %select.false
  4625. // select.true:
  4626. // br label %select.end
  4627. // select.false:
  4628. // br label %select.end
  4629. // select.end:
  4630. // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
  4631. //
  4632. // In addition, we may sink instructions that produce %c or %d from
  4633. // the entry block into the destination(s) of the new branch.
  4634. // If the true or false blocks do not contain a sunken instruction, that
  4635. // block and its branch may be optimized away. In that case, one side of the
  4636. // first branch will point directly to select.end, and the corresponding PHI
  4637. // predecessor block will be the start block.
  4638. // First, we split the block containing the select into 2 blocks.
  4639. BasicBlock *StartBlock = SI->getParent();
  4640. BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI));
  4641. BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
  4642. // Delete the unconditional branch that was just created by the split.
  4643. StartBlock->getTerminator()->eraseFromParent();
  4644. // These are the new basic blocks for the conditional branch.
  4645. // At least one will become an actual new basic block.
  4646. BasicBlock *TrueBlock = nullptr;
  4647. BasicBlock *FalseBlock = nullptr;
  4648. BranchInst *TrueBranch = nullptr;
  4649. BranchInst *FalseBranch = nullptr;
  4650. // Sink expensive instructions into the conditional blocks to avoid executing
  4651. // them speculatively.
  4652. for (SelectInst *SI : ASI) {
  4653. if (sinkSelectOperand(TTI, SI->getTrueValue())) {
  4654. if (TrueBlock == nullptr) {
  4655. TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink",
  4656. EndBlock->getParent(), EndBlock);
  4657. TrueBranch = BranchInst::Create(EndBlock, TrueBlock);
  4658. }
  4659. auto *TrueInst = cast<Instruction>(SI->getTrueValue());
  4660. TrueInst->moveBefore(TrueBranch);
  4661. }
  4662. if (sinkSelectOperand(TTI, SI->getFalseValue())) {
  4663. if (FalseBlock == nullptr) {
  4664. FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink",
  4665. EndBlock->getParent(), EndBlock);
  4666. FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
  4667. }
  4668. auto *FalseInst = cast<Instruction>(SI->getFalseValue());
  4669. FalseInst->moveBefore(FalseBranch);
  4670. }
  4671. }
  4672. // If there was nothing to sink, then arbitrarily choose the 'false' side
  4673. // for a new input value to the PHI.
  4674. if (TrueBlock == FalseBlock) {
  4675. assert(TrueBlock == nullptr &&
  4676. "Unexpected basic block transform while optimizing select");
  4677. FalseBlock = BasicBlock::Create(SI->getContext(), "select.false",
  4678. EndBlock->getParent(), EndBlock);
  4679. BranchInst::Create(EndBlock, FalseBlock);
  4680. }
  4681. // Insert the real conditional branch based on the original condition.
  4682. // If we did not create a new block for one of the 'true' or 'false' paths
  4683. // of the condition, it means that side of the branch goes to the end block
  4684. // directly and the path originates from the start block from the point of
  4685. // view of the new PHI.
  4686. BasicBlock *TT, *FT;
  4687. if (TrueBlock == nullptr) {
  4688. TT = EndBlock;
  4689. FT = FalseBlock;
  4690. TrueBlock = StartBlock;
  4691. } else if (FalseBlock == nullptr) {
  4692. TT = TrueBlock;
  4693. FT = EndBlock;
  4694. FalseBlock = StartBlock;
  4695. } else {
  4696. TT = TrueBlock;
  4697. FT = FalseBlock;
  4698. }
  4699. IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI);
  4700. SmallPtrSet<const Instruction *, 2> INS;
  4701. INS.insert(ASI.begin(), ASI.end());
  4702. // Use reverse iterator because later select may use the value of the
  4703. // earlier select, and we need to propagate value through earlier select
  4704. // to get the PHI operand.
  4705. for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) {
  4706. SelectInst *SI = *It;
  4707. // The select itself is replaced with a PHI Node.
  4708. PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front());
  4709. PN->takeName(SI);
  4710. PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
  4711. PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
  4712. SI->replaceAllUsesWith(PN);
  4713. SI->eraseFromParent();
  4714. INS.erase(SI);
  4715. ++NumSelectsExpanded;
  4716. }
  4717. // Instruct OptimizeBlock to skip to the next block.
  4718. CurInstIterator = StartBlock->end();
  4719. return true;
  4720. }
  4721. static bool isBroadcastShuffle(ShuffleVectorInst *SVI) {
  4722. SmallVector<int, 16> Mask(SVI->getShuffleMask());
  4723. int SplatElem = -1;
  4724. for (unsigned i = 0; i < Mask.size(); ++i) {
  4725. if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem)
  4726. return false;
  4727. SplatElem = Mask[i];
  4728. }
  4729. return true;
  4730. }
  4731. /// Some targets have expensive vector shifts if the lanes aren't all the same
  4732. /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases
  4733. /// it's often worth sinking a shufflevector splat down to its use so that
  4734. /// codegen can spot all lanes are identical.
  4735. bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
  4736. BasicBlock *DefBB = SVI->getParent();
  4737. // Only do this xform if variable vector shifts are particularly expensive.
  4738. if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
  4739. return false;
  4740. // We only expect better codegen by sinking a shuffle if we can recognise a
  4741. // constant splat.
  4742. if (!isBroadcastShuffle(SVI))
  4743. return false;
  4744. // InsertedShuffles - Only insert a shuffle in each block once.
  4745. DenseMap<BasicBlock*, Instruction*> InsertedShuffles;
  4746. bool MadeChange = false;
  4747. for (User *U : SVI->users()) {
  4748. Instruction *UI = cast<Instruction>(U);
  4749. // Figure out which BB this ext is used in.
  4750. BasicBlock *UserBB = UI->getParent();
  4751. if (UserBB == DefBB) continue;
  4752. // For now only apply this when the splat is used by a shift instruction.
  4753. if (!UI->isShift()) continue;
  4754. // Everything checks out, sink the shuffle if the user's block doesn't
  4755. // already have a copy.
  4756. Instruction *&InsertedShuffle = InsertedShuffles[UserBB];
  4757. if (!InsertedShuffle) {
  4758. BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
  4759. assert(InsertPt != UserBB->end());
  4760. InsertedShuffle =
  4761. new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
  4762. SVI->getOperand(2), "", &*InsertPt);
  4763. }
  4764. UI->replaceUsesOfWith(SVI, InsertedShuffle);
  4765. MadeChange = true;
  4766. }
  4767. // If we removed all uses, nuke the shuffle.
  4768. if (SVI->use_empty()) {
  4769. SVI->eraseFromParent();
  4770. MadeChange = true;
  4771. }
  4772. return MadeChange;
  4773. }
  4774. bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
  4775. if (!TLI || !DL)
  4776. return false;
  4777. Value *Cond = SI->getCondition();
  4778. Type *OldType = Cond->getType();
  4779. LLVMContext &Context = Cond->getContext();
  4780. MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType));
  4781. unsigned RegWidth = RegType.getSizeInBits();
  4782. if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
  4783. return false;
  4784. // If the register width is greater than the type width, expand the condition
  4785. // of the switch instruction and each case constant to the width of the
  4786. // register. By widening the type of the switch condition, subsequent
  4787. // comparisons (for case comparisons) will not need to be extended to the
  4788. // preferred register width, so we will potentially eliminate N-1 extends,
  4789. // where N is the number of cases in the switch.
  4790. auto *NewType = Type::getIntNTy(Context, RegWidth);
  4791. // Zero-extend the switch condition and case constants unless the switch
  4792. // condition is a function argument that is already being sign-extended.
  4793. // In that case, we can avoid an unnecessary mask/extension by sign-extending
  4794. // everything instead.
  4795. Instruction::CastOps ExtType = Instruction::ZExt;
  4796. if (auto *Arg = dyn_cast<Argument>(Cond))
  4797. if (Arg->hasSExtAttr())
  4798. ExtType = Instruction::SExt;
  4799. auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
  4800. ExtInst->insertBefore(SI);
  4801. SI->setCondition(ExtInst);
  4802. for (auto Case : SI->cases()) {
  4803. APInt NarrowConst = Case.getCaseValue()->getValue();
  4804. APInt WideConst = (ExtType == Instruction::ZExt) ?
  4805. NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth);
  4806. Case.setValue(ConstantInt::get(Context, WideConst));
  4807. }
  4808. return true;
  4809. }
  4810. namespace {
  4811. /// \brief Helper class to promote a scalar operation to a vector one.
  4812. /// This class is used to move downward extractelement transition.
  4813. /// E.g.,
  4814. /// a = vector_op <2 x i32>
  4815. /// b = extractelement <2 x i32> a, i32 0
  4816. /// c = scalar_op b
  4817. /// store c
  4818. ///
  4819. /// =>
  4820. /// a = vector_op <2 x i32>
  4821. /// c = vector_op a (equivalent to scalar_op on the related lane)
  4822. /// * d = extractelement <2 x i32> c, i32 0
  4823. /// * store d
  4824. /// Assuming both extractelement and store can be combine, we get rid of the
  4825. /// transition.
  4826. class VectorPromoteHelper {
  4827. /// DataLayout associated with the current module.
  4828. const DataLayout &DL;
  4829. /// Used to perform some checks on the legality of vector operations.
  4830. const TargetLowering &TLI;
  4831. /// Used to estimated the cost of the promoted chain.
  4832. const TargetTransformInfo &TTI;
  4833. /// The transition being moved downwards.
  4834. Instruction *Transition;
  4835. /// The sequence of instructions to be promoted.
  4836. SmallVector<Instruction *, 4> InstsToBePromoted;
  4837. /// Cost of combining a store and an extract.
  4838. unsigned StoreExtractCombineCost;
  4839. /// Instruction that will be combined with the transition.
  4840. Instruction *CombineInst;
  4841. /// \brief The instruction that represents the current end of the transition.
  4842. /// Since we are faking the promotion until we reach the end of the chain
  4843. /// of computation, we need a way to get the current end of the transition.
  4844. Instruction *getEndOfTransition() const {
  4845. if (InstsToBePromoted.empty())
  4846. return Transition;
  4847. return InstsToBePromoted.back();
  4848. }
  4849. /// \brief Return the index of the original value in the transition.
  4850. /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
  4851. /// c, is at index 0.
  4852. unsigned getTransitionOriginalValueIdx() const {
  4853. assert(isa<ExtractElementInst>(Transition) &&
  4854. "Other kind of transitions are not supported yet");
  4855. return 0;
  4856. }
  4857. /// \brief Return the index of the index in the transition.
  4858. /// E.g., for "extractelement <2 x i32> c, i32 0" the index
  4859. /// is at index 1.
  4860. unsigned getTransitionIdx() const {
  4861. assert(isa<ExtractElementInst>(Transition) &&
  4862. "Other kind of transitions are not supported yet");
  4863. return 1;
  4864. }
  4865. /// \brief Get the type of the transition.
  4866. /// This is the type of the original value.
  4867. /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
  4868. /// transition is <2 x i32>.
  4869. Type *getTransitionType() const {
  4870. return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
  4871. }
  4872. /// \brief Promote \p ToBePromoted by moving \p Def downward through.
  4873. /// I.e., we have the following sequence:
  4874. /// Def = Transition <ty1> a to <ty2>
  4875. /// b = ToBePromoted <ty2> Def, ...
  4876. /// =>
  4877. /// b = ToBePromoted <ty1> a, ...
  4878. /// Def = Transition <ty1> ToBePromoted to <ty2>
  4879. void promoteImpl(Instruction *ToBePromoted);
  4880. /// \brief Check whether or not it is profitable to promote all the
  4881. /// instructions enqueued to be promoted.
  4882. bool isProfitableToPromote() {
  4883. Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
  4884. unsigned Index = isa<ConstantInt>(ValIdx)
  4885. ? cast<ConstantInt>(ValIdx)->getZExtValue()
  4886. : -1;
  4887. Type *PromotedType = getTransitionType();
  4888. StoreInst *ST = cast<StoreInst>(CombineInst);
  4889. unsigned AS = ST->getPointerAddressSpace();
  4890. unsigned Align = ST->getAlignment();
  4891. // Check if this store is supported.
  4892. if (!TLI.allowsMisalignedMemoryAccesses(
  4893. TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
  4894. Align)) {
  4895. // If this is not supported, there is no way we can combine
  4896. // the extract with the store.
  4897. return false;
  4898. }
  4899. // The scalar chain of computation has to pay for the transition
  4900. // scalar to vector.
  4901. // The vector chain has to account for the combining cost.
  4902. uint64_t ScalarCost =
  4903. TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index);
  4904. uint64_t VectorCost = StoreExtractCombineCost;
  4905. for (const auto &Inst : InstsToBePromoted) {
  4906. // Compute the cost.
  4907. // By construction, all instructions being promoted are arithmetic ones.
  4908. // Moreover, one argument is a constant that can be viewed as a splat
  4909. // constant.
  4910. Value *Arg0 = Inst->getOperand(0);
  4911. bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
  4912. isa<ConstantFP>(Arg0);
  4913. TargetTransformInfo::OperandValueKind Arg0OVK =
  4914. IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
  4915. : TargetTransformInfo::OK_AnyValue;
  4916. TargetTransformInfo::OperandValueKind Arg1OVK =
  4917. !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
  4918. : TargetTransformInfo::OK_AnyValue;
  4919. ScalarCost += TTI.getArithmeticInstrCost(
  4920. Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK);
  4921. VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
  4922. Arg0OVK, Arg1OVK);
  4923. }
  4924. DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
  4925. << ScalarCost << "\nVector: " << VectorCost << '\n');
  4926. return ScalarCost > VectorCost;
  4927. }
  4928. /// \brief Generate a constant vector with \p Val with the same
  4929. /// number of elements as the transition.
  4930. /// \p UseSplat defines whether or not \p Val should be replicated
  4931. /// across the whole vector.
  4932. /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
  4933. /// otherwise we generate a vector with as many undef as possible:
  4934. /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
  4935. /// used at the index of the extract.
  4936. Value *getConstantVector(Constant *Val, bool UseSplat) const {
  4937. unsigned ExtractIdx = UINT_MAX;
  4938. if (!UseSplat) {
  4939. // If we cannot determine where the constant must be, we have to
  4940. // use a splat constant.
  4941. Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
  4942. if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
  4943. ExtractIdx = CstVal->getSExtValue();
  4944. else
  4945. UseSplat = true;
  4946. }
  4947. unsigned End = getTransitionType()->getVectorNumElements();
  4948. if (UseSplat)
  4949. return ConstantVector::getSplat(End, Val);
  4950. SmallVector<Constant *, 4> ConstVec;
  4951. UndefValue *UndefVal = UndefValue::get(Val->getType());
  4952. for (unsigned Idx = 0; Idx != End; ++Idx) {
  4953. if (Idx == ExtractIdx)
  4954. ConstVec.push_back(Val);
  4955. else
  4956. ConstVec.push_back(UndefVal);
  4957. }
  4958. return ConstantVector::get(ConstVec);
  4959. }
  4960. /// \brief Check if promoting to a vector type an operand at \p OperandIdx
  4961. /// in \p Use can trigger undefined behavior.
  4962. static bool canCauseUndefinedBehavior(const Instruction *Use,
  4963. unsigned OperandIdx) {
  4964. // This is not safe to introduce undef when the operand is on
  4965. // the right hand side of a division-like instruction.
  4966. if (OperandIdx != 1)
  4967. return false;
  4968. switch (Use->getOpcode()) {
  4969. default:
  4970. return false;
  4971. case Instruction::SDiv:
  4972. case Instruction::UDiv:
  4973. case Instruction::SRem:
  4974. case Instruction::URem:
  4975. return true;
  4976. case Instruction::FDiv:
  4977. case Instruction::FRem:
  4978. return !Use->hasNoNaNs();
  4979. }
  4980. llvm_unreachable(nullptr);
  4981. }
  4982. public:
  4983. VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
  4984. const TargetTransformInfo &TTI, Instruction *Transition,
  4985. unsigned CombineCost)
  4986. : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
  4987. StoreExtractCombineCost(CombineCost), CombineInst(nullptr) {
  4988. assert(Transition && "Do not know how to promote null");
  4989. }
  4990. /// \brief Check if we can promote \p ToBePromoted to \p Type.
  4991. bool canPromote(const Instruction *ToBePromoted) const {
  4992. // We could support CastInst too.
  4993. return isa<BinaryOperator>(ToBePromoted);
  4994. }
  4995. /// \brief Check if it is profitable to promote \p ToBePromoted
  4996. /// by moving downward the transition through.
  4997. bool shouldPromote(const Instruction *ToBePromoted) const {
  4998. // Promote only if all the operands can be statically expanded.
  4999. // Indeed, we do not want to introduce any new kind of transitions.
  5000. for (const Use &U : ToBePromoted->operands()) {
  5001. const Value *Val = U.get();
  5002. if (Val == getEndOfTransition()) {
  5003. // If the use is a division and the transition is on the rhs,
  5004. // we cannot promote the operation, otherwise we may create a
  5005. // division by zero.
  5006. if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
  5007. return false;
  5008. continue;
  5009. }
  5010. if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
  5011. !isa<ConstantFP>(Val))
  5012. return false;
  5013. }
  5014. // Check that the resulting operation is legal.
  5015. int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
  5016. if (!ISDOpcode)
  5017. return false;
  5018. return StressStoreExtract ||
  5019. TLI.isOperationLegalOrCustom(
  5020. ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
  5021. }
  5022. /// \brief Check whether or not \p Use can be combined
  5023. /// with the transition.
  5024. /// I.e., is it possible to do Use(Transition) => AnotherUse?
  5025. bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
  5026. /// \brief Record \p ToBePromoted as part of the chain to be promoted.
  5027. void enqueueForPromotion(Instruction *ToBePromoted) {
  5028. InstsToBePromoted.push_back(ToBePromoted);
  5029. }
  5030. /// \brief Set the instruction that will be combined with the transition.
  5031. void recordCombineInstruction(Instruction *ToBeCombined) {
  5032. assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
  5033. CombineInst = ToBeCombined;
  5034. }
  5035. /// \brief Promote all the instructions enqueued for promotion if it is
  5036. /// is profitable.
  5037. /// \return True if the promotion happened, false otherwise.
  5038. bool promote() {
  5039. // Check if there is something to promote.
  5040. // Right now, if we do not have anything to combine with,
  5041. // we assume the promotion is not profitable.
  5042. if (InstsToBePromoted.empty() || !CombineInst)
  5043. return false;
  5044. // Check cost.
  5045. if (!StressStoreExtract && !isProfitableToPromote())
  5046. return false;
  5047. // Promote.
  5048. for (auto &ToBePromoted : InstsToBePromoted)
  5049. promoteImpl(ToBePromoted);
  5050. InstsToBePromoted.clear();
  5051. return true;
  5052. }
  5053. };
  5054. } // End of anonymous namespace.
  5055. void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
  5056. // At this point, we know that all the operands of ToBePromoted but Def
  5057. // can be statically promoted.
  5058. // For Def, we need to use its parameter in ToBePromoted:
  5059. // b = ToBePromoted ty1 a
  5060. // Def = Transition ty1 b to ty2
  5061. // Move the transition down.
  5062. // 1. Replace all uses of the promoted operation by the transition.
  5063. // = ... b => = ... Def.
  5064. assert(ToBePromoted->getType() == Transition->getType() &&
  5065. "The type of the result of the transition does not match "
  5066. "the final type");
  5067. ToBePromoted->replaceAllUsesWith(Transition);
  5068. // 2. Update the type of the uses.
  5069. // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
  5070. Type *TransitionTy = getTransitionType();
  5071. ToBePromoted->mutateType(TransitionTy);
  5072. // 3. Update all the operands of the promoted operation with promoted
  5073. // operands.
  5074. // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
  5075. for (Use &U : ToBePromoted->operands()) {
  5076. Value *Val = U.get();
  5077. Value *NewVal = nullptr;
  5078. if (Val == Transition)
  5079. NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
  5080. else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
  5081. isa<ConstantFP>(Val)) {
  5082. // Use a splat constant if it is not safe to use undef.
  5083. NewVal = getConstantVector(
  5084. cast<Constant>(Val),
  5085. isa<UndefValue>(Val) ||
  5086. canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
  5087. } else
  5088. llvm_unreachable("Did you modified shouldPromote and forgot to update "
  5089. "this?");
  5090. ToBePromoted->setOperand(U.getOperandNo(), NewVal);
  5091. }
  5092. Transition->removeFromParent();
  5093. Transition->insertAfter(ToBePromoted);
  5094. Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
  5095. }
  5096. /// Some targets can do store(extractelement) with one instruction.
  5097. /// Try to push the extractelement towards the stores when the target
  5098. /// has this feature and this is profitable.
  5099. bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
  5100. unsigned CombineCost = UINT_MAX;
  5101. if (DisableStoreExtract || !TLI ||
  5102. (!StressStoreExtract &&
  5103. !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
  5104. Inst->getOperand(1), CombineCost)))
  5105. return false;
  5106. // At this point we know that Inst is a vector to scalar transition.
  5107. // Try to move it down the def-use chain, until:
  5108. // - We can combine the transition with its single use
  5109. // => we got rid of the transition.
  5110. // - We escape the current basic block
  5111. // => we would need to check that we are moving it at a cheaper place and
  5112. // we do not do that for now.
  5113. BasicBlock *Parent = Inst->getParent();
  5114. DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
  5115. VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
  5116. // If the transition has more than one use, assume this is not going to be
  5117. // beneficial.
  5118. while (Inst->hasOneUse()) {
  5119. Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
  5120. DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
  5121. if (ToBePromoted->getParent() != Parent) {
  5122. DEBUG(dbgs() << "Instruction to promote is in a different block ("
  5123. << ToBePromoted->getParent()->getName()
  5124. << ") than the transition (" << Parent->getName() << ").\n");
  5125. return false;
  5126. }
  5127. if (VPH.canCombine(ToBePromoted)) {
  5128. DEBUG(dbgs() << "Assume " << *Inst << '\n'
  5129. << "will be combined with: " << *ToBePromoted << '\n');
  5130. VPH.recordCombineInstruction(ToBePromoted);
  5131. bool Changed = VPH.promote();
  5132. NumStoreExtractExposed += Changed;
  5133. return Changed;
  5134. }
  5135. DEBUG(dbgs() << "Try promoting.\n");
  5136. if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
  5137. return false;
  5138. DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
  5139. VPH.enqueueForPromotion(ToBePromoted);
  5140. Inst = ToBePromoted;
  5141. }
  5142. return false;
  5143. }
  5144. /// For the instruction sequence of store below, F and I values
  5145. /// are bundled together as an i64 value before being stored into memory.
  5146. /// Sometimes it is more efficent to generate separate stores for F and I,
  5147. /// which can remove the bitwise instructions or sink them to colder places.
  5148. ///
  5149. /// (store (or (zext (bitcast F to i32) to i64),
  5150. /// (shl (zext I to i64), 32)), addr) -->
  5151. /// (store F, addr) and (store I, addr+4)
  5152. ///
  5153. /// Similarly, splitting for other merged store can also be beneficial, like:
  5154. /// For pair of {i32, i32}, i64 store --> two i32 stores.
  5155. /// For pair of {i32, i16}, i64 store --> two i32 stores.
  5156. /// For pair of {i16, i16}, i32 store --> two i16 stores.
  5157. /// For pair of {i16, i8}, i32 store --> two i16 stores.
  5158. /// For pair of {i8, i8}, i16 store --> two i8 stores.
  5159. ///
  5160. /// We allow each target to determine specifically which kind of splitting is
  5161. /// supported.
  5162. ///
  5163. /// The store patterns are commonly seen from the simple code snippet below
  5164. /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
  5165. /// void goo(const std::pair<int, float> &);
  5166. /// hoo() {
  5167. /// ...
  5168. /// goo(std::make_pair(tmp, ftmp));
  5169. /// ...
  5170. /// }
  5171. ///
  5172. /// Although we already have similar splitting in DAG Combine, we duplicate
  5173. /// it in CodeGenPrepare to catch the case in which pattern is across
  5174. /// multiple BBs. The logic in DAG Combine is kept to catch case generated
  5175. /// during code expansion.
  5176. static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
  5177. const TargetLowering &TLI) {
  5178. // Handle simple but common cases only.
  5179. Type *StoreType = SI.getValueOperand()->getType();
  5180. if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) ||
  5181. DL.getTypeSizeInBits(StoreType) == 0)
  5182. return false;
  5183. unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
  5184. Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
  5185. if (DL.getTypeStoreSizeInBits(SplitStoreType) !=
  5186. DL.getTypeSizeInBits(SplitStoreType))
  5187. return false;
  5188. // Match the following patterns:
  5189. // (store (or (zext LValue to i64),
  5190. // (shl (zext HValue to i64), 32)), HalfValBitSize)
  5191. // or
  5192. // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
  5193. // (zext LValue to i64),
  5194. // Expect both operands of OR and the first operand of SHL have only
  5195. // one use.
  5196. Value *LValue, *HValue;
  5197. if (!match(SI.getValueOperand(),
  5198. m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
  5199. m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
  5200. m_SpecificInt(HalfValBitSize))))))
  5201. return false;
  5202. // Check LValue and HValue are int with size less or equal than 32.
  5203. if (!LValue->getType()->isIntegerTy() ||
  5204. DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
  5205. !HValue->getType()->isIntegerTy() ||
  5206. DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
  5207. return false;
  5208. // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
  5209. // as the input of target query.
  5210. auto *LBC = dyn_cast<BitCastInst>(LValue);
  5211. auto *HBC = dyn_cast<BitCastInst>(HValue);
  5212. EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
  5213. : EVT::getEVT(LValue->getType());
  5214. EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
  5215. : EVT::getEVT(HValue->getType());
  5216. if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
  5217. return false;
  5218. // Start to split store.
  5219. IRBuilder<> Builder(SI.getContext());
  5220. Builder.SetInsertPoint(&SI);
  5221. // If LValue/HValue is a bitcast in another BB, create a new one in current
  5222. // BB so it may be merged with the splitted stores by dag combiner.
  5223. if (LBC && LBC->getParent() != SI.getParent())
  5224. LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
  5225. if (HBC && HBC->getParent() != SI.getParent())
  5226. HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
  5227. auto CreateSplitStore = [&](Value *V, bool Upper) {
  5228. V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
  5229. Value *Addr = Builder.CreateBitCast(
  5230. SI.getOperand(1),
  5231. SplitStoreType->getPointerTo(SI.getPointerAddressSpace()));
  5232. if (Upper)
  5233. Addr = Builder.CreateGEP(
  5234. SplitStoreType, Addr,
  5235. ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
  5236. Builder.CreateAlignedStore(
  5237. V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment());
  5238. };
  5239. CreateSplitStore(LValue, false);
  5240. CreateSplitStore(HValue, true);
  5241. // Delete the old store.
  5242. SI.eraseFromParent();
  5243. return true;
  5244. }
  5245. bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) {
  5246. // Bail out if we inserted the instruction to prevent optimizations from
  5247. // stepping on each other's toes.
  5248. if (InsertedInsts.count(I))
  5249. return false;
  5250. if (PHINode *P = dyn_cast<PHINode>(I)) {
  5251. // It is possible for very late stage optimizations (such as SimplifyCFG)
  5252. // to introduce PHI nodes too late to be cleaned up. If we detect such a
  5253. // trivial PHI, go ahead and zap it here.
  5254. if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) {
  5255. P->replaceAllUsesWith(V);
  5256. P->eraseFromParent();
  5257. ++NumPHIsElim;
  5258. return true;
  5259. }
  5260. return false;
  5261. }
  5262. if (CastInst *CI = dyn_cast<CastInst>(I)) {
  5263. // If the source of the cast is a constant, then this should have
  5264. // already been constant folded. The only reason NOT to constant fold
  5265. // it is if something (e.g. LSR) was careful to place the constant
  5266. // evaluation in a block other than then one that uses it (e.g. to hoist
  5267. // the address of globals out of a loop). If this is the case, we don't
  5268. // want to forward-subst the cast.
  5269. if (isa<Constant>(CI->getOperand(0)))
  5270. return false;
  5271. if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL))
  5272. return true;
  5273. if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
  5274. /// Sink a zext or sext into its user blocks if the target type doesn't
  5275. /// fit in one register
  5276. if (TLI &&
  5277. TLI->getTypeAction(CI->getContext(),
  5278. TLI->getValueType(*DL, CI->getType())) ==
  5279. TargetLowering::TypeExpandInteger) {
  5280. return SinkCast(CI);
  5281. } else {
  5282. bool MadeChange = optimizeExt(I);
  5283. return MadeChange | optimizeExtUses(I);
  5284. }
  5285. }
  5286. return false;
  5287. }
  5288. if (CmpInst *CI = dyn_cast<CmpInst>(I))
  5289. if (!TLI || !TLI->hasMultipleConditionRegisters())
  5290. return OptimizeCmpExpression(CI, TLI);
  5291. if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
  5292. LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
  5293. if (TLI) {
  5294. bool Modified = optimizeLoadExt(LI);
  5295. unsigned AS = LI->getPointerAddressSpace();
  5296. Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
  5297. return Modified;
  5298. }
  5299. return false;
  5300. }
  5301. if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
  5302. if (TLI && splitMergedValStore(*SI, *DL, *TLI))
  5303. return true;
  5304. SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
  5305. if (TLI) {
  5306. unsigned AS = SI->getPointerAddressSpace();
  5307. return optimizeMemoryInst(I, SI->getOperand(1),
  5308. SI->getOperand(0)->getType(), AS);
  5309. }
  5310. return false;
  5311. }
  5312. if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
  5313. unsigned AS = RMW->getPointerAddressSpace();
  5314. return optimizeMemoryInst(I, RMW->getPointerOperand(),
  5315. RMW->getType(), AS);
  5316. }
  5317. if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
  5318. unsigned AS = CmpX->getPointerAddressSpace();
  5319. return optimizeMemoryInst(I, CmpX->getPointerOperand(),
  5320. CmpX->getCompareOperand()->getType(), AS);
  5321. }
  5322. BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
  5323. if (BinOp && (BinOp->getOpcode() == Instruction::And) &&
  5324. EnableAndCmpSinking && TLI)
  5325. return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
  5326. if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
  5327. BinOp->getOpcode() == Instruction::LShr)) {
  5328. ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
  5329. if (TLI && CI && TLI->hasExtractBitsInsn())
  5330. return OptimizeExtractBits(BinOp, CI, *TLI, *DL);
  5331. return false;
  5332. }
  5333. if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
  5334. if (GEPI->hasAllZeroIndices()) {
  5335. /// The GEP operand must be a pointer, so must its result -> BitCast
  5336. Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
  5337. GEPI->getName(), GEPI);
  5338. GEPI->replaceAllUsesWith(NC);
  5339. GEPI->eraseFromParent();
  5340. ++NumGEPsElim;
  5341. optimizeInst(NC, ModifiedDT);
  5342. return true;
  5343. }
  5344. return false;
  5345. }
  5346. if (CallInst *CI = dyn_cast<CallInst>(I))
  5347. return optimizeCallInst(CI, ModifiedDT);
  5348. if (SelectInst *SI = dyn_cast<SelectInst>(I))
  5349. return optimizeSelectInst(SI);
  5350. if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I))
  5351. return optimizeShuffleVectorInst(SVI);
  5352. if (auto *Switch = dyn_cast<SwitchInst>(I))
  5353. return optimizeSwitchInst(Switch);
  5354. if (isa<ExtractElementInst>(I))
  5355. return optimizeExtractElementInst(I);
  5356. return false;
  5357. }
  5358. /// Given an OR instruction, check to see if this is a bitreverse
  5359. /// idiom. If so, insert the new intrinsic and return true.
  5360. static bool makeBitReverse(Instruction &I, const DataLayout &DL,
  5361. const TargetLowering &TLI) {
  5362. if (!I.getType()->isIntegerTy() ||
  5363. !TLI.isOperationLegalOrCustom(ISD::BITREVERSE,
  5364. TLI.getValueType(DL, I.getType(), true)))
  5365. return false;
  5366. SmallVector<Instruction*, 4> Insts;
  5367. if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
  5368. return false;
  5369. Instruction *LastInst = Insts.back();
  5370. I.replaceAllUsesWith(LastInst);
  5371. RecursivelyDeleteTriviallyDeadInstructions(&I);
  5372. return true;
  5373. }
  5374. // In this pass we look for GEP and cast instructions that are used
  5375. // across basic blocks and rewrite them to improve basic-block-at-a-time
  5376. // selection.
  5377. bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool& ModifiedDT) {
  5378. SunkAddrs.clear();
  5379. bool MadeChange = false;
  5380. CurInstIterator = BB.begin();
  5381. while (CurInstIterator != BB.end()) {
  5382. MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
  5383. if (ModifiedDT)
  5384. return true;
  5385. }
  5386. bool MadeBitReverse = true;
  5387. while (TLI && MadeBitReverse) {
  5388. MadeBitReverse = false;
  5389. for (auto &I : reverse(BB)) {
  5390. if (makeBitReverse(I, *DL, *TLI)) {
  5391. MadeBitReverse = MadeChange = true;
  5392. ModifiedDT = true;
  5393. break;
  5394. }
  5395. }
  5396. }
  5397. MadeChange |= dupRetToEnableTailCallOpts(&BB);
  5398. return MadeChange;
  5399. }
  5400. // llvm.dbg.value is far away from the value then iSel may not be able
  5401. // handle it properly. iSel will drop llvm.dbg.value if it can not
  5402. // find a node corresponding to the value.
  5403. bool CodeGenPrepare::placeDbgValues(Function &F) {
  5404. bool MadeChange = false;
  5405. for (BasicBlock &BB : F) {
  5406. Instruction *PrevNonDbgInst = nullptr;
  5407. for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
  5408. Instruction *Insn = &*BI++;
  5409. DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
  5410. // Leave dbg.values that refer to an alloca alone. These
  5411. // instrinsics describe the address of a variable (= the alloca)
  5412. // being taken. They should not be moved next to the alloca
  5413. // (and to the beginning of the scope), but rather stay close to
  5414. // where said address is used.
  5415. if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) {
  5416. PrevNonDbgInst = Insn;
  5417. continue;
  5418. }
  5419. Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
  5420. if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) {
  5421. // If VI is a phi in a block with an EHPad terminator, we can't insert
  5422. // after it.
  5423. if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
  5424. continue;
  5425. DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
  5426. DVI->removeFromParent();
  5427. if (isa<PHINode>(VI))
  5428. DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
  5429. else
  5430. DVI->insertAfter(VI);
  5431. MadeChange = true;
  5432. ++NumDbgValueMoved;
  5433. }
  5434. }
  5435. }
  5436. return MadeChange;
  5437. }
  5438. /// \brief Scale down both weights to fit into uint32_t.
  5439. static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
  5440. uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
  5441. uint32_t Scale = (NewMax / UINT32_MAX) + 1;
  5442. NewTrue = NewTrue / Scale;
  5443. NewFalse = NewFalse / Scale;
  5444. }
  5445. /// \brief Some targets prefer to split a conditional branch like:
  5446. /// \code
  5447. /// %0 = icmp ne i32 %a, 0
  5448. /// %1 = icmp ne i32 %b, 0
  5449. /// %or.cond = or i1 %0, %1
  5450. /// br i1 %or.cond, label %TrueBB, label %FalseBB
  5451. /// \endcode
  5452. /// into multiple branch instructions like:
  5453. /// \code
  5454. /// bb1:
  5455. /// %0 = icmp ne i32 %a, 0
  5456. /// br i1 %0, label %TrueBB, label %bb2
  5457. /// bb2:
  5458. /// %1 = icmp ne i32 %b, 0
  5459. /// br i1 %1, label %TrueBB, label %FalseBB
  5460. /// \endcode
  5461. /// This usually allows instruction selection to do even further optimizations
  5462. /// and combine the compare with the branch instruction. Currently this is
  5463. /// applied for targets which have "cheap" jump instructions.
  5464. ///
  5465. /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
  5466. ///
  5467. bool CodeGenPrepare::splitBranchCondition(Function &F) {
  5468. if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive())
  5469. return false;
  5470. bool MadeChange = false;
  5471. for (auto &BB : F) {
  5472. // Does this BB end with the following?
  5473. // %cond1 = icmp|fcmp|binary instruction ...
  5474. // %cond2 = icmp|fcmp|binary instruction ...
  5475. // %cond.or = or|and i1 %cond1, cond2
  5476. // br i1 %cond.or label %dest1, label %dest2"
  5477. BinaryOperator *LogicOp;
  5478. BasicBlock *TBB, *FBB;
  5479. if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB)))
  5480. continue;
  5481. auto *Br1 = cast<BranchInst>(BB.getTerminator());
  5482. if (Br1->getMetadata(LLVMContext::MD_unpredictable))
  5483. continue;
  5484. unsigned Opc;
  5485. Value *Cond1, *Cond2;
  5486. if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)),
  5487. m_OneUse(m_Value(Cond2)))))
  5488. Opc = Instruction::And;
  5489. else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)),
  5490. m_OneUse(m_Value(Cond2)))))
  5491. Opc = Instruction::Or;
  5492. else
  5493. continue;
  5494. if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) ||
  5495. !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) )
  5496. continue;
  5497. DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
  5498. // Create a new BB.
  5499. auto TmpBB =
  5500. BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
  5501. BB.getParent(), BB.getNextNode());
  5502. // Update original basic block by using the first condition directly by the
  5503. // branch instruction and removing the no longer needed and/or instruction.
  5504. Br1->setCondition(Cond1);
  5505. LogicOp->eraseFromParent();
  5506. // Depending on the conditon we have to either replace the true or the false
  5507. // successor of the original branch instruction.
  5508. if (Opc == Instruction::And)
  5509. Br1->setSuccessor(0, TmpBB);
  5510. else
  5511. Br1->setSuccessor(1, TmpBB);
  5512. // Fill in the new basic block.
  5513. auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
  5514. if (auto *I = dyn_cast<Instruction>(Cond2)) {
  5515. I->removeFromParent();
  5516. I->insertBefore(Br2);
  5517. }
  5518. // Update PHI nodes in both successors. The original BB needs to be
  5519. // replaced in one succesor's PHI nodes, because the branch comes now from
  5520. // the newly generated BB (NewBB). In the other successor we need to add one
  5521. // incoming edge to the PHI nodes, because both branch instructions target
  5522. // now the same successor. Depending on the original branch condition
  5523. // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
  5524. // we perform the correct update for the PHI nodes.
  5525. // This doesn't change the successor order of the just created branch
  5526. // instruction (or any other instruction).
  5527. if (Opc == Instruction::Or)
  5528. std::swap(TBB, FBB);
  5529. // Replace the old BB with the new BB.
  5530. for (auto &I : *TBB) {
  5531. PHINode *PN = dyn_cast<PHINode>(&I);
  5532. if (!PN)
  5533. break;
  5534. int i;
  5535. while ((i = PN->getBasicBlockIndex(&BB)) >= 0)
  5536. PN->setIncomingBlock(i, TmpBB);
  5537. }
  5538. // Add another incoming edge form the new BB.
  5539. for (auto &I : *FBB) {
  5540. PHINode *PN = dyn_cast<PHINode>(&I);
  5541. if (!PN)
  5542. break;
  5543. auto *Val = PN->getIncomingValueForBlock(&BB);
  5544. PN->addIncoming(Val, TmpBB);
  5545. }
  5546. // Update the branch weights (from SelectionDAGBuilder::
  5547. // FindMergedConditions).
  5548. if (Opc == Instruction::Or) {
  5549. // Codegen X | Y as:
  5550. // BB1:
  5551. // jmp_if_X TBB
  5552. // jmp TmpBB
  5553. // TmpBB:
  5554. // jmp_if_Y TBB
  5555. // jmp FBB
  5556. //
  5557. // We have flexibility in setting Prob for BB1 and Prob for NewBB.
  5558. // The requirement is that
  5559. // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
  5560. // = TrueProb for orignal BB.
  5561. // Assuming the orignal weights are A and B, one choice is to set BB1's
  5562. // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
  5563. // assumes that
  5564. // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
  5565. // Another choice is to assume TrueProb for BB1 equals to TrueProb for
  5566. // TmpBB, but the math is more complicated.
  5567. uint64_t TrueWeight, FalseWeight;
  5568. if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) {
  5569. uint64_t NewTrueWeight = TrueWeight;
  5570. uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
  5571. scaleWeights(NewTrueWeight, NewFalseWeight);
  5572. Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
  5573. .createBranchWeights(TrueWeight, FalseWeight));
  5574. NewTrueWeight = TrueWeight;
  5575. NewFalseWeight = 2 * FalseWeight;
  5576. scaleWeights(NewTrueWeight, NewFalseWeight);
  5577. Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
  5578. .createBranchWeights(TrueWeight, FalseWeight));
  5579. }
  5580. } else {
  5581. // Codegen X & Y as:
  5582. // BB1:
  5583. // jmp_if_X TmpBB
  5584. // jmp FBB
  5585. // TmpBB:
  5586. // jmp_if_Y TBB
  5587. // jmp FBB
  5588. //
  5589. // This requires creation of TmpBB after CurBB.
  5590. // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
  5591. // The requirement is that
  5592. // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
  5593. // = FalseProb for orignal BB.
  5594. // Assuming the orignal weights are A and B, one choice is to set BB1's
  5595. // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
  5596. // assumes that
  5597. // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
  5598. uint64_t TrueWeight, FalseWeight;
  5599. if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) {
  5600. uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
  5601. uint64_t NewFalseWeight = FalseWeight;
  5602. scaleWeights(NewTrueWeight, NewFalseWeight);
  5603. Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
  5604. .createBranchWeights(TrueWeight, FalseWeight));
  5605. NewTrueWeight = 2 * TrueWeight;
  5606. NewFalseWeight = FalseWeight;
  5607. scaleWeights(NewTrueWeight, NewFalseWeight);
  5608. Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
  5609. .createBranchWeights(TrueWeight, FalseWeight));
  5610. }
  5611. }
  5612. // Note: No point in getting fancy here, since the DT info is never
  5613. // available to CodeGenPrepare.
  5614. ModifiedDT = true;
  5615. MadeChange = true;
  5616. DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
  5617. TmpBB->dump());
  5618. }
  5619. return MadeChange;
  5620. }