LoopVectorize.cpp 317 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836
  1. //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
  10. // and generates target-independent LLVM-IR.
  11. // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
  12. // of instructions in order to estimate the profitability of vectorization.
  13. //
  14. // The loop vectorizer combines consecutive loop iterations into a single
  15. // 'wide' iteration. After this transformation the index is incremented
  16. // by the SIMD vector width, and not by one.
  17. //
  18. // This pass has three parts:
  19. // 1. The main loop pass that drives the different parts.
  20. // 2. LoopVectorizationLegality - A unit that checks for the legality
  21. // of the vectorization.
  22. // 3. InnerLoopVectorizer - A unit that performs the actual
  23. // widening of instructions.
  24. // 4. LoopVectorizationCostModel - A unit that checks for the profitability
  25. // of vectorization. It decides on the optimal vector width, which
  26. // can be one, if vectorization is not profitable.
  27. //
  28. // There is a development effort going on to migrate loop vectorizer to the
  29. // VPlan infrastructure and to introduce outer loop vectorization support (see
  30. // docs/Proposal/VectorizationPlan.rst and
  31. // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
  32. // purpose, we temporarily introduced the VPlan-native vectorization path: an
  33. // alternative vectorization path that is natively implemented on top of the
  34. // VPlan infrastructure. See EnableVPlanNativePath for enabling.
  35. //
  36. //===----------------------------------------------------------------------===//
  37. //
  38. // The reduction-variable vectorization is based on the paper:
  39. // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
  40. //
  41. // Variable uniformity checks are inspired by:
  42. // Karrenberg, R. and Hack, S. Whole Function Vectorization.
  43. //
  44. // The interleaved access vectorization is based on the paper:
  45. // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
  46. // Data for SIMD
  47. //
  48. // Other ideas/concepts are from:
  49. // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
  50. //
  51. // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
  52. // Vectorizing Compilers.
  53. //
  54. //===----------------------------------------------------------------------===//
  55. #include "llvm/Transforms/Vectorize/LoopVectorize.h"
  56. #include "LoopVectorizationPlanner.h"
  57. #include "VPRecipeBuilder.h"
  58. #include "VPlan.h"
  59. #include "VPlanHCFGBuilder.h"
  60. #include "VPlanHCFGTransforms.h"
  61. #include "VPlanPredicator.h"
  62. #include "llvm/ADT/APInt.h"
  63. #include "llvm/ADT/ArrayRef.h"
  64. #include "llvm/ADT/DenseMap.h"
  65. #include "llvm/ADT/DenseMapInfo.h"
  66. #include "llvm/ADT/Hashing.h"
  67. #include "llvm/ADT/MapVector.h"
  68. #include "llvm/ADT/None.h"
  69. #include "llvm/ADT/Optional.h"
  70. #include "llvm/ADT/STLExtras.h"
  71. #include "llvm/ADT/SetVector.h"
  72. #include "llvm/ADT/SmallPtrSet.h"
  73. #include "llvm/ADT/SmallVector.h"
  74. #include "llvm/ADT/Statistic.h"
  75. #include "llvm/ADT/StringRef.h"
  76. #include "llvm/ADT/Twine.h"
  77. #include "llvm/ADT/iterator_range.h"
  78. #include "llvm/Analysis/AssumptionCache.h"
  79. #include "llvm/Analysis/BasicAliasAnalysis.h"
  80. #include "llvm/Analysis/BlockFrequencyInfo.h"
  81. #include "llvm/Analysis/CFG.h"
  82. #include "llvm/Analysis/CodeMetrics.h"
  83. #include "llvm/Analysis/DemandedBits.h"
  84. #include "llvm/Analysis/GlobalsModRef.h"
  85. #include "llvm/Analysis/LoopAccessAnalysis.h"
  86. #include "llvm/Analysis/LoopAnalysisManager.h"
  87. #include "llvm/Analysis/LoopInfo.h"
  88. #include "llvm/Analysis/LoopIterator.h"
  89. #include "llvm/Analysis/MemorySSA.h"
  90. #include "llvm/Analysis/OptimizationRemarkEmitter.h"
  91. #include "llvm/Analysis/ProfileSummaryInfo.h"
  92. #include "llvm/Analysis/ScalarEvolution.h"
  93. #include "llvm/Analysis/ScalarEvolutionExpander.h"
  94. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  95. #include "llvm/Analysis/TargetLibraryInfo.h"
  96. #include "llvm/Analysis/TargetTransformInfo.h"
  97. #include "llvm/Analysis/VectorUtils.h"
  98. #include "llvm/IR/Attributes.h"
  99. #include "llvm/IR/BasicBlock.h"
  100. #include "llvm/IR/CFG.h"
  101. #include "llvm/IR/Constant.h"
  102. #include "llvm/IR/Constants.h"
  103. #include "llvm/IR/DataLayout.h"
  104. #include "llvm/IR/DebugInfoMetadata.h"
  105. #include "llvm/IR/DebugLoc.h"
  106. #include "llvm/IR/DerivedTypes.h"
  107. #include "llvm/IR/DiagnosticInfo.h"
  108. #include "llvm/IR/Dominators.h"
  109. #include "llvm/IR/Function.h"
  110. #include "llvm/IR/IRBuilder.h"
  111. #include "llvm/IR/InstrTypes.h"
  112. #include "llvm/IR/Instruction.h"
  113. #include "llvm/IR/Instructions.h"
  114. #include "llvm/IR/IntrinsicInst.h"
  115. #include "llvm/IR/Intrinsics.h"
  116. #include "llvm/IR/LLVMContext.h"
  117. #include "llvm/IR/Metadata.h"
  118. #include "llvm/IR/Module.h"
  119. #include "llvm/IR/Operator.h"
  120. #include "llvm/IR/Type.h"
  121. #include "llvm/IR/Use.h"
  122. #include "llvm/IR/User.h"
  123. #include "llvm/IR/Value.h"
  124. #include "llvm/IR/ValueHandle.h"
  125. #include "llvm/IR/Verifier.h"
  126. #include "llvm/Pass.h"
  127. #include "llvm/Support/Casting.h"
  128. #include "llvm/Support/CommandLine.h"
  129. #include "llvm/Support/Compiler.h"
  130. #include "llvm/Support/Debug.h"
  131. #include "llvm/Support/ErrorHandling.h"
  132. #include "llvm/Support/MathExtras.h"
  133. #include "llvm/Support/raw_ostream.h"
  134. #include "llvm/Transforms/Utils/BasicBlockUtils.h"
  135. #include "llvm/Transforms/Utils/LoopSimplify.h"
  136. #include "llvm/Transforms/Utils/LoopUtils.h"
  137. #include "llvm/Transforms/Utils/LoopVersioning.h"
  138. #include "llvm/Transforms/Utils/SizeOpts.h"
  139. #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
  140. #include <algorithm>
  141. #include <cassert>
  142. #include <cstdint>
  143. #include <cstdlib>
  144. #include <functional>
  145. #include <iterator>
  146. #include <limits>
  147. #include <memory>
  148. #include <string>
  149. #include <tuple>
  150. #include <utility>
  151. #include <vector>
  152. using namespace llvm;
  153. #define LV_NAME "loop-vectorize"
  154. #define DEBUG_TYPE LV_NAME
  155. /// @{
  156. /// Metadata attribute names
  157. static const char *const LLVMLoopVectorizeFollowupAll =
  158. "llvm.loop.vectorize.followup_all";
  159. static const char *const LLVMLoopVectorizeFollowupVectorized =
  160. "llvm.loop.vectorize.followup_vectorized";
  161. static const char *const LLVMLoopVectorizeFollowupEpilogue =
  162. "llvm.loop.vectorize.followup_epilogue";
  163. /// @}
  164. STATISTIC(LoopsVectorized, "Number of loops vectorized");
  165. STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
  166. /// Loops with a known constant trip count below this number are vectorized only
  167. /// if no scalar iteration overheads are incurred.
  168. static cl::opt<unsigned> TinyTripCountVectorThreshold(
  169. "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
  170. cl::desc("Loops with a constant trip count that is smaller than this "
  171. "value are vectorized only if no scalar iteration overheads "
  172. "are incurred."));
  173. // Indicates that an epilogue is undesired, predication is preferred.
  174. // This means that the vectorizer will try to fold the loop-tail (epilogue)
  175. // into the loop and predicate the loop body accordingly.
  176. static cl::opt<bool> PreferPredicateOverEpilog(
  177. "prefer-predicate-over-epilog", cl::init(false), cl::Hidden,
  178. cl::desc("Indicate that an epilogue is undesired, predication should be "
  179. "used instead."));
  180. static cl::opt<bool> MaximizeBandwidth(
  181. "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
  182. cl::desc("Maximize bandwidth when selecting vectorization factor which "
  183. "will be determined by the smallest type in loop."));
  184. static cl::opt<bool> EnableInterleavedMemAccesses(
  185. "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
  186. cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
  187. /// An interleave-group may need masking if it resides in a block that needs
  188. /// predication, or in order to mask away gaps.
  189. static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
  190. "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
  191. cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
  192. /// We don't interleave loops with a known constant trip count below this
  193. /// number.
  194. static const unsigned TinyTripCountInterleaveThreshold = 128;
  195. static cl::opt<unsigned> ForceTargetNumScalarRegs(
  196. "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
  197. cl::desc("A flag that overrides the target's number of scalar registers."));
  198. static cl::opt<unsigned> ForceTargetNumVectorRegs(
  199. "force-target-num-vector-regs", cl::init(0), cl::Hidden,
  200. cl::desc("A flag that overrides the target's number of vector registers."));
  201. static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
  202. "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
  203. cl::desc("A flag that overrides the target's max interleave factor for "
  204. "scalar loops."));
  205. static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
  206. "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
  207. cl::desc("A flag that overrides the target's max interleave factor for "
  208. "vectorized loops."));
  209. static cl::opt<unsigned> ForceTargetInstructionCost(
  210. "force-target-instruction-cost", cl::init(0), cl::Hidden,
  211. cl::desc("A flag that overrides the target's expected cost for "
  212. "an instruction to a single constant value. Mostly "
  213. "useful for getting consistent testing."));
  214. static cl::opt<unsigned> SmallLoopCost(
  215. "small-loop-cost", cl::init(20), cl::Hidden,
  216. cl::desc(
  217. "The cost of a loop that is considered 'small' by the interleaver."));
  218. static cl::opt<bool> LoopVectorizeWithBlockFrequency(
  219. "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
  220. cl::desc("Enable the use of the block frequency analysis to access PGO "
  221. "heuristics minimizing code growth in cold regions and being more "
  222. "aggressive in hot regions."));
  223. // Runtime interleave loops for load/store throughput.
  224. static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
  225. "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
  226. cl::desc(
  227. "Enable runtime interleaving until load/store ports are saturated"));
  228. /// The number of stores in a loop that are allowed to need predication.
  229. static cl::opt<unsigned> NumberOfStoresToPredicate(
  230. "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
  231. cl::desc("Max number of stores to be predicated behind an if."));
  232. static cl::opt<bool> EnableIndVarRegisterHeur(
  233. "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
  234. cl::desc("Count the induction variable only once when interleaving"));
  235. static cl::opt<bool> EnableCondStoresVectorization(
  236. "enable-cond-stores-vec", cl::init(true), cl::Hidden,
  237. cl::desc("Enable if predication of stores during vectorization."));
  238. static cl::opt<unsigned> MaxNestedScalarReductionIC(
  239. "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
  240. cl::desc("The maximum interleave count to use when interleaving a scalar "
  241. "reduction in a nested loop."));
  242. cl::opt<bool> EnableVPlanNativePath(
  243. "enable-vplan-native-path", cl::init(false), cl::Hidden,
  244. cl::desc("Enable VPlan-native vectorization path with "
  245. "support for outer loop vectorization."));
  246. // FIXME: Remove this switch once we have divergence analysis. Currently we
  247. // assume divergent non-backedge branches when this switch is true.
  248. cl::opt<bool> EnableVPlanPredication(
  249. "enable-vplan-predication", cl::init(false), cl::Hidden,
  250. cl::desc("Enable VPlan-native vectorization path predicator with "
  251. "support for outer loop vectorization."));
  252. // This flag enables the stress testing of the VPlan H-CFG construction in the
  253. // VPlan-native vectorization path. It must be used in conjuction with
  254. // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
  255. // verification of the H-CFGs built.
  256. static cl::opt<bool> VPlanBuildStressTest(
  257. "vplan-build-stress-test", cl::init(false), cl::Hidden,
  258. cl::desc(
  259. "Build VPlan for every supported loop nest in the function and bail "
  260. "out right after the build (stress test the VPlan H-CFG construction "
  261. "in the VPlan-native vectorization path)."));
  262. cl::opt<bool> llvm::EnableLoopInterleaving(
  263. "interleave-loops", cl::init(true), cl::Hidden,
  264. cl::desc("Enable loop interleaving in Loop vectorization passes"));
  265. cl::opt<bool> llvm::EnableLoopVectorization(
  266. "vectorize-loops", cl::init(true), cl::Hidden,
  267. cl::desc("Run the Loop vectorization passes"));
  268. /// A helper function for converting Scalar types to vector types.
  269. /// If the incoming type is void, we return void. If the VF is 1, we return
  270. /// the scalar type.
  271. static Type *ToVectorTy(Type *Scalar, unsigned VF) {
  272. if (Scalar->isVoidTy() || VF == 1)
  273. return Scalar;
  274. return VectorType::get(Scalar, VF);
  275. }
  276. /// A helper function that returns the type of loaded or stored value.
  277. static Type *getMemInstValueType(Value *I) {
  278. assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
  279. "Expected Load or Store instruction");
  280. if (auto *LI = dyn_cast<LoadInst>(I))
  281. return LI->getType();
  282. return cast<StoreInst>(I)->getValueOperand()->getType();
  283. }
  284. /// A helper function that returns true if the given type is irregular. The
  285. /// type is irregular if its allocated size doesn't equal the store size of an
  286. /// element of the corresponding vector type at the given vectorization factor.
  287. static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
  288. // Determine if an array of VF elements of type Ty is "bitcast compatible"
  289. // with a <VF x Ty> vector.
  290. if (VF > 1) {
  291. auto *VectorTy = VectorType::get(Ty, VF);
  292. return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
  293. }
  294. // If the vectorization factor is one, we just check if an array of type Ty
  295. // requires padding between elements.
  296. return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
  297. }
  298. /// A helper function that returns the reciprocal of the block probability of
  299. /// predicated blocks. If we return X, we are assuming the predicated block
  300. /// will execute once for every X iterations of the loop header.
  301. ///
  302. /// TODO: We should use actual block probability here, if available. Currently,
  303. /// we always assume predicated blocks have a 50% chance of executing.
  304. static unsigned getReciprocalPredBlockProb() { return 2; }
  305. /// A helper function that adds a 'fast' flag to floating-point operations.
  306. static Value *addFastMathFlag(Value *V) {
  307. if (isa<FPMathOperator>(V))
  308. cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
  309. return V;
  310. }
  311. static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
  312. if (isa<FPMathOperator>(V))
  313. cast<Instruction>(V)->setFastMathFlags(FMF);
  314. return V;
  315. }
  316. /// A helper function that returns an integer or floating-point constant with
  317. /// value C.
  318. static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
  319. return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
  320. : ConstantFP::get(Ty, C);
  321. }
  322. namespace llvm {
  323. /// InnerLoopVectorizer vectorizes loops which contain only one basic
  324. /// block to a specified vectorization factor (VF).
  325. /// This class performs the widening of scalars into vectors, or multiple
  326. /// scalars. This class also implements the following features:
  327. /// * It inserts an epilogue loop for handling loops that don't have iteration
  328. /// counts that are known to be a multiple of the vectorization factor.
  329. /// * It handles the code generation for reduction variables.
  330. /// * Scalarization (implementation using scalars) of un-vectorizable
  331. /// instructions.
  332. /// InnerLoopVectorizer does not perform any vectorization-legality
  333. /// checks, and relies on the caller to check for the different legality
  334. /// aspects. The InnerLoopVectorizer relies on the
  335. /// LoopVectorizationLegality class to provide information about the induction
  336. /// and reduction variables that were found to a given vectorization factor.
  337. class InnerLoopVectorizer {
  338. public:
  339. InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
  340. LoopInfo *LI, DominatorTree *DT,
  341. const TargetLibraryInfo *TLI,
  342. const TargetTransformInfo *TTI, AssumptionCache *AC,
  343. OptimizationRemarkEmitter *ORE, unsigned VecWidth,
  344. unsigned UnrollFactor, LoopVectorizationLegality *LVL,
  345. LoopVectorizationCostModel *CM)
  346. : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
  347. AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
  348. Builder(PSE.getSE()->getContext()),
  349. VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
  350. virtual ~InnerLoopVectorizer() = default;
  351. /// Create a new empty loop. Unlink the old loop and connect the new one.
  352. /// Return the pre-header block of the new loop.
  353. BasicBlock *createVectorizedLoopSkeleton();
  354. /// Widen a single instruction within the innermost loop.
  355. void widenInstruction(Instruction &I);
  356. /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
  357. void fixVectorizedLoop();
  358. // Return true if any runtime check is added.
  359. bool areSafetyChecksAdded() { return AddedSafetyChecks; }
  360. /// A type for vectorized values in the new loop. Each value from the
  361. /// original loop, when vectorized, is represented by UF vector values in the
  362. /// new unrolled loop, where UF is the unroll factor.
  363. using VectorParts = SmallVector<Value *, 2>;
  364. /// Vectorize a single PHINode in a block. This method handles the induction
  365. /// variable canonicalization. It supports both VF = 1 for unrolled loops and
  366. /// arbitrary length vectors.
  367. void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
  368. /// A helper function to scalarize a single Instruction in the innermost loop.
  369. /// Generates a sequence of scalar instances for each lane between \p MinLane
  370. /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
  371. /// inclusive..
  372. void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
  373. bool IfPredicateInstr);
  374. /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
  375. /// is provided, the integer induction variable will first be truncated to
  376. /// the corresponding type.
  377. void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
  378. /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
  379. /// vector or scalar value on-demand if one is not yet available. When
  380. /// vectorizing a loop, we visit the definition of an instruction before its
  381. /// uses. When visiting the definition, we either vectorize or scalarize the
  382. /// instruction, creating an entry for it in the corresponding map. (In some
  383. /// cases, such as induction variables, we will create both vector and scalar
  384. /// entries.) Then, as we encounter uses of the definition, we derive values
  385. /// for each scalar or vector use unless such a value is already available.
  386. /// For example, if we scalarize a definition and one of its uses is vector,
  387. /// we build the required vector on-demand with an insertelement sequence
  388. /// when visiting the use. Otherwise, if the use is scalar, we can use the
  389. /// existing scalar definition.
  390. ///
  391. /// Return a value in the new loop corresponding to \p V from the original
  392. /// loop at unroll index \p Part. If the value has already been vectorized,
  393. /// the corresponding vector entry in VectorLoopValueMap is returned. If,
  394. /// however, the value has a scalar entry in VectorLoopValueMap, we construct
  395. /// a new vector value on-demand by inserting the scalar values into a vector
  396. /// with an insertelement sequence. If the value has been neither vectorized
  397. /// nor scalarized, it must be loop invariant, so we simply broadcast the
  398. /// value into a vector.
  399. Value *getOrCreateVectorValue(Value *V, unsigned Part);
  400. /// Return a value in the new loop corresponding to \p V from the original
  401. /// loop at unroll and vector indices \p Instance. If the value has been
  402. /// vectorized but not scalarized, the necessary extractelement instruction
  403. /// will be generated.
  404. Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
  405. /// Construct the vector value of a scalarized value \p V one lane at a time.
  406. void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
  407. /// Try to vectorize the interleaved access group that \p Instr belongs to,
  408. /// optionally masking the vector operations if \p BlockInMask is non-null.
  409. void vectorizeInterleaveGroup(Instruction *Instr,
  410. VectorParts *BlockInMask = nullptr);
  411. /// Vectorize Load and Store instructions, optionally masking the vector
  412. /// operations if \p BlockInMask is non-null.
  413. void vectorizeMemoryInstruction(Instruction *Instr,
  414. VectorParts *BlockInMask = nullptr);
  415. /// Set the debug location in the builder using the debug location in
  416. /// the instruction.
  417. void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
  418. /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
  419. void fixNonInductionPHIs(void);
  420. protected:
  421. friend class LoopVectorizationPlanner;
  422. /// A small list of PHINodes.
  423. using PhiVector = SmallVector<PHINode *, 4>;
  424. /// A type for scalarized values in the new loop. Each value from the
  425. /// original loop, when scalarized, is represented by UF x VF scalar values
  426. /// in the new unrolled loop, where UF is the unroll factor and VF is the
  427. /// vectorization factor.
  428. using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
  429. /// Set up the values of the IVs correctly when exiting the vector loop.
  430. void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
  431. Value *CountRoundDown, Value *EndValue,
  432. BasicBlock *MiddleBlock);
  433. /// Create a new induction variable inside L.
  434. PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
  435. Value *Step, Instruction *DL);
  436. /// Handle all cross-iteration phis in the header.
  437. void fixCrossIterationPHIs();
  438. /// Fix a first-order recurrence. This is the second phase of vectorizing
  439. /// this phi node.
  440. void fixFirstOrderRecurrence(PHINode *Phi);
  441. /// Fix a reduction cross-iteration phi. This is the second phase of
  442. /// vectorizing this phi node.
  443. void fixReduction(PHINode *Phi);
  444. /// The Loop exit block may have single value PHI nodes with some
  445. /// incoming value. While vectorizing we only handled real values
  446. /// that were defined inside the loop and we should have one value for
  447. /// each predecessor of its parent basic block. See PR14725.
  448. void fixLCSSAPHIs();
  449. /// Iteratively sink the scalarized operands of a predicated instruction into
  450. /// the block that was created for it.
  451. void sinkScalarOperands(Instruction *PredInst);
  452. /// Shrinks vector element sizes to the smallest bitwidth they can be legally
  453. /// represented as.
  454. void truncateToMinimalBitwidths();
  455. /// Insert the new loop to the loop hierarchy and pass manager
  456. /// and update the analysis passes.
  457. void updateAnalysis();
  458. /// Create a broadcast instruction. This method generates a broadcast
  459. /// instruction (shuffle) for loop invariant values and for the induction
  460. /// value. If this is the induction variable then we extend it to N, N+1, ...
  461. /// this is needed because each iteration in the loop corresponds to a SIMD
  462. /// element.
  463. virtual Value *getBroadcastInstrs(Value *V);
  464. /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
  465. /// to each vector element of Val. The sequence starts at StartIndex.
  466. /// \p Opcode is relevant for FP induction variable.
  467. virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
  468. Instruction::BinaryOps Opcode =
  469. Instruction::BinaryOpsEnd);
  470. /// Compute scalar induction steps. \p ScalarIV is the scalar induction
  471. /// variable on which to base the steps, \p Step is the size of the step, and
  472. /// \p EntryVal is the value from the original loop that maps to the steps.
  473. /// Note that \p EntryVal doesn't have to be an induction variable - it
  474. /// can also be a truncate instruction.
  475. void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
  476. const InductionDescriptor &ID);
  477. /// Create a vector induction phi node based on an existing scalar one. \p
  478. /// EntryVal is the value from the original loop that maps to the vector phi
  479. /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
  480. /// truncate instruction, instead of widening the original IV, we widen a
  481. /// version of the IV truncated to \p EntryVal's type.
  482. void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
  483. Value *Step, Instruction *EntryVal);
  484. /// Returns true if an instruction \p I should be scalarized instead of
  485. /// vectorized for the chosen vectorization factor.
  486. bool shouldScalarizeInstruction(Instruction *I) const;
  487. /// Returns true if we should generate a scalar version of \p IV.
  488. bool needsScalarInduction(Instruction *IV) const;
  489. /// If there is a cast involved in the induction variable \p ID, which should
  490. /// be ignored in the vectorized loop body, this function records the
  491. /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
  492. /// cast. We had already proved that the casted Phi is equal to the uncasted
  493. /// Phi in the vectorized loop (under a runtime guard), and therefore
  494. /// there is no need to vectorize the cast - the same value can be used in the
  495. /// vector loop for both the Phi and the cast.
  496. /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
  497. /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
  498. ///
  499. /// \p EntryVal is the value from the original loop that maps to the vector
  500. /// phi node and is used to distinguish what is the IV currently being
  501. /// processed - original one (if \p EntryVal is a phi corresponding to the
  502. /// original IV) or the "newly-created" one based on the proof mentioned above
  503. /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
  504. /// latter case \p EntryVal is a TruncInst and we must not record anything for
  505. /// that IV, but it's error-prone to expect callers of this routine to care
  506. /// about that, hence this explicit parameter.
  507. void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
  508. const Instruction *EntryVal,
  509. Value *VectorLoopValue,
  510. unsigned Part,
  511. unsigned Lane = UINT_MAX);
  512. /// Generate a shuffle sequence that will reverse the vector Vec.
  513. virtual Value *reverseVector(Value *Vec);
  514. /// Returns (and creates if needed) the original loop trip count.
  515. Value *getOrCreateTripCount(Loop *NewLoop);
  516. /// Returns (and creates if needed) the trip count of the widened loop.
  517. Value *getOrCreateVectorTripCount(Loop *NewLoop);
  518. /// Returns a bitcasted value to the requested vector type.
  519. /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
  520. Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
  521. const DataLayout &DL);
  522. /// Emit a bypass check to see if the vector trip count is zero, including if
  523. /// it overflows.
  524. void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
  525. /// Emit a bypass check to see if all of the SCEV assumptions we've
  526. /// had to make are correct.
  527. void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
  528. /// Emit bypass checks to check any memory assumptions we may have made.
  529. void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
  530. /// Compute the transformed value of Index at offset StartValue using step
  531. /// StepValue.
  532. /// For integer induction, returns StartValue + Index * StepValue.
  533. /// For pointer induction, returns StartValue[Index * StepValue].
  534. /// FIXME: The newly created binary instructions should contain nsw/nuw
  535. /// flags, which can be found from the original scalar operations.
  536. Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
  537. const DataLayout &DL,
  538. const InductionDescriptor &ID) const;
  539. /// Add additional metadata to \p To that was not present on \p Orig.
  540. ///
  541. /// Currently this is used to add the noalias annotations based on the
  542. /// inserted memchecks. Use this for instructions that are *cloned* into the
  543. /// vector loop.
  544. void addNewMetadata(Instruction *To, const Instruction *Orig);
  545. /// Add metadata from one instruction to another.
  546. ///
  547. /// This includes both the original MDs from \p From and additional ones (\see
  548. /// addNewMetadata). Use this for *newly created* instructions in the vector
  549. /// loop.
  550. void addMetadata(Instruction *To, Instruction *From);
  551. /// Similar to the previous function but it adds the metadata to a
  552. /// vector of instructions.
  553. void addMetadata(ArrayRef<Value *> To, Instruction *From);
  554. /// The original loop.
  555. Loop *OrigLoop;
  556. /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
  557. /// dynamic knowledge to simplify SCEV expressions and converts them to a
  558. /// more usable form.
  559. PredicatedScalarEvolution &PSE;
  560. /// Loop Info.
  561. LoopInfo *LI;
  562. /// Dominator Tree.
  563. DominatorTree *DT;
  564. /// Alias Analysis.
  565. AliasAnalysis *AA;
  566. /// Target Library Info.
  567. const TargetLibraryInfo *TLI;
  568. /// Target Transform Info.
  569. const TargetTransformInfo *TTI;
  570. /// Assumption Cache.
  571. AssumptionCache *AC;
  572. /// Interface to emit optimization remarks.
  573. OptimizationRemarkEmitter *ORE;
  574. /// LoopVersioning. It's only set up (non-null) if memchecks were
  575. /// used.
  576. ///
  577. /// This is currently only used to add no-alias metadata based on the
  578. /// memchecks. The actually versioning is performed manually.
  579. std::unique_ptr<LoopVersioning> LVer;
  580. /// The vectorization SIMD factor to use. Each vector will have this many
  581. /// vector elements.
  582. unsigned VF;
  583. /// The vectorization unroll factor to use. Each scalar is vectorized to this
  584. /// many different vector instructions.
  585. unsigned UF;
  586. /// The builder that we use
  587. IRBuilder<> Builder;
  588. // --- Vectorization state ---
  589. /// The vector-loop preheader.
  590. BasicBlock *LoopVectorPreHeader;
  591. /// The scalar-loop preheader.
  592. BasicBlock *LoopScalarPreHeader;
  593. /// Middle Block between the vector and the scalar.
  594. BasicBlock *LoopMiddleBlock;
  595. /// The ExitBlock of the scalar loop.
  596. BasicBlock *LoopExitBlock;
  597. /// The vector loop body.
  598. BasicBlock *LoopVectorBody;
  599. /// The scalar loop body.
  600. BasicBlock *LoopScalarBody;
  601. /// A list of all bypass blocks. The first block is the entry of the loop.
  602. SmallVector<BasicBlock *, 4> LoopBypassBlocks;
  603. /// The new Induction variable which was added to the new block.
  604. PHINode *Induction = nullptr;
  605. /// The induction variable of the old basic block.
  606. PHINode *OldInduction = nullptr;
  607. /// Maps values from the original loop to their corresponding values in the
  608. /// vectorized loop. A key value can map to either vector values, scalar
  609. /// values or both kinds of values, depending on whether the key was
  610. /// vectorized and scalarized.
  611. VectorizerValueMap VectorLoopValueMap;
  612. /// Store instructions that were predicated.
  613. SmallVector<Instruction *, 4> PredicatedInstructions;
  614. /// Trip count of the original loop.
  615. Value *TripCount = nullptr;
  616. /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
  617. Value *VectorTripCount = nullptr;
  618. /// The legality analysis.
  619. LoopVectorizationLegality *Legal;
  620. /// The profitablity analysis.
  621. LoopVectorizationCostModel *Cost;
  622. // Record whether runtime checks are added.
  623. bool AddedSafetyChecks = false;
  624. // Holds the end values for each induction variable. We save the end values
  625. // so we can later fix-up the external users of the induction variables.
  626. DenseMap<PHINode *, Value *> IVEndValues;
  627. // Vector of original scalar PHIs whose corresponding widened PHIs need to be
  628. // fixed up at the end of vector code generation.
  629. SmallVector<PHINode *, 8> OrigPHIsToFix;
  630. };
  631. class InnerLoopUnroller : public InnerLoopVectorizer {
  632. public:
  633. InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
  634. LoopInfo *LI, DominatorTree *DT,
  635. const TargetLibraryInfo *TLI,
  636. const TargetTransformInfo *TTI, AssumptionCache *AC,
  637. OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
  638. LoopVectorizationLegality *LVL,
  639. LoopVectorizationCostModel *CM)
  640. : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
  641. UnrollFactor, LVL, CM) {}
  642. private:
  643. Value *getBroadcastInstrs(Value *V) override;
  644. Value *getStepVector(Value *Val, int StartIdx, Value *Step,
  645. Instruction::BinaryOps Opcode =
  646. Instruction::BinaryOpsEnd) override;
  647. Value *reverseVector(Value *Vec) override;
  648. };
  649. } // end namespace llvm
  650. /// Look for a meaningful debug location on the instruction or it's
  651. /// operands.
  652. static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
  653. if (!I)
  654. return I;
  655. DebugLoc Empty;
  656. if (I->getDebugLoc() != Empty)
  657. return I;
  658. for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
  659. if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
  660. if (OpInst->getDebugLoc() != Empty)
  661. return OpInst;
  662. }
  663. return I;
  664. }
  665. void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
  666. if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
  667. const DILocation *DIL = Inst->getDebugLoc();
  668. if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
  669. !isa<DbgInfoIntrinsic>(Inst)) {
  670. auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
  671. if (NewDIL)
  672. B.SetCurrentDebugLocation(NewDIL.getValue());
  673. else
  674. LLVM_DEBUG(dbgs()
  675. << "Failed to create new discriminator: "
  676. << DIL->getFilename() << " Line: " << DIL->getLine());
  677. }
  678. else
  679. B.SetCurrentDebugLocation(DIL);
  680. } else
  681. B.SetCurrentDebugLocation(DebugLoc());
  682. }
  683. /// Write a record \p DebugMsg about vectorization failure to the debug
  684. /// output stream. If \p I is passed, it is an instruction that prevents
  685. /// vectorization.
  686. #ifndef NDEBUG
  687. static void debugVectorizationFailure(const StringRef DebugMsg,
  688. Instruction *I) {
  689. dbgs() << "LV: Not vectorizing: " << DebugMsg;
  690. if (I != nullptr)
  691. dbgs() << " " << *I;
  692. else
  693. dbgs() << '.';
  694. dbgs() << '\n';
  695. }
  696. #endif
  697. /// Create an analysis remark that explains why vectorization failed
  698. ///
  699. /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
  700. /// RemarkName is the identifier for the remark. If \p I is passed it is an
  701. /// instruction that prevents vectorization. Otherwise \p TheLoop is used for
  702. /// the location of the remark. \return the remark object that can be
  703. /// streamed to.
  704. static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
  705. StringRef RemarkName, Loop *TheLoop, Instruction *I) {
  706. Value *CodeRegion = TheLoop->getHeader();
  707. DebugLoc DL = TheLoop->getStartLoc();
  708. if (I) {
  709. CodeRegion = I->getParent();
  710. // If there is no debug location attached to the instruction, revert back to
  711. // using the loop's.
  712. if (I->getDebugLoc())
  713. DL = I->getDebugLoc();
  714. }
  715. OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
  716. R << "loop not vectorized: ";
  717. return R;
  718. }
  719. namespace llvm {
  720. void reportVectorizationFailure(const StringRef DebugMsg,
  721. const StringRef OREMsg, const StringRef ORETag,
  722. OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
  723. LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
  724. LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
  725. ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
  726. ORETag, TheLoop, I) << OREMsg);
  727. }
  728. } // end namespace llvm
  729. #ifndef NDEBUG
  730. /// \return string containing a file name and a line # for the given loop.
  731. static std::string getDebugLocString(const Loop *L) {
  732. std::string Result;
  733. if (L) {
  734. raw_string_ostream OS(Result);
  735. if (const DebugLoc LoopDbgLoc = L->getStartLoc())
  736. LoopDbgLoc.print(OS);
  737. else
  738. // Just print the module name.
  739. OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
  740. OS.flush();
  741. }
  742. return Result;
  743. }
  744. #endif
  745. void InnerLoopVectorizer::addNewMetadata(Instruction *To,
  746. const Instruction *Orig) {
  747. // If the loop was versioned with memchecks, add the corresponding no-alias
  748. // metadata.
  749. if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
  750. LVer->annotateInstWithNoAlias(To, Orig);
  751. }
  752. void InnerLoopVectorizer::addMetadata(Instruction *To,
  753. Instruction *From) {
  754. propagateMetadata(To, From);
  755. addNewMetadata(To, From);
  756. }
  757. void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
  758. Instruction *From) {
  759. for (Value *V : To) {
  760. if (Instruction *I = dyn_cast<Instruction>(V))
  761. addMetadata(I, From);
  762. }
  763. }
  764. namespace llvm {
  765. // Loop vectorization cost-model hints how the scalar epilogue loop should be
  766. // lowered.
  767. enum ScalarEpilogueLowering {
  768. // The default: allowing scalar epilogues.
  769. CM_ScalarEpilogueAllowed,
  770. // Vectorization with OptForSize: don't allow epilogues.
  771. CM_ScalarEpilogueNotAllowedOptSize,
  772. // A special case of vectorisation with OptForSize: loops with a very small
  773. // trip count are considered for vectorization under OptForSize, thereby
  774. // making sure the cost of their loop body is dominant, free of runtime
  775. // guards and scalar iteration overheads.
  776. CM_ScalarEpilogueNotAllowedLowTripLoop,
  777. // Loop hint predicate indicating an epilogue is undesired.
  778. CM_ScalarEpilogueNotNeededUsePredicate
  779. };
  780. /// LoopVectorizationCostModel - estimates the expected speedups due to
  781. /// vectorization.
  782. /// In many cases vectorization is not profitable. This can happen because of
  783. /// a number of reasons. In this class we mainly attempt to predict the
  784. /// expected speedup/slowdowns due to the supported instruction set. We use the
  785. /// TargetTransformInfo to query the different backends for the cost of
  786. /// different operations.
  787. class LoopVectorizationCostModel {
  788. public:
  789. LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
  790. PredicatedScalarEvolution &PSE, LoopInfo *LI,
  791. LoopVectorizationLegality *Legal,
  792. const TargetTransformInfo &TTI,
  793. const TargetLibraryInfo *TLI, DemandedBits *DB,
  794. AssumptionCache *AC,
  795. OptimizationRemarkEmitter *ORE, const Function *F,
  796. const LoopVectorizeHints *Hints,
  797. InterleavedAccessInfo &IAI)
  798. : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
  799. TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
  800. Hints(Hints), InterleaveInfo(IAI) {}
  801. /// \return An upper bound for the vectorization factor, or None if
  802. /// vectorization and interleaving should be avoided up front.
  803. Optional<unsigned> computeMaxVF();
  804. /// \return True if runtime checks are required for vectorization, and false
  805. /// otherwise.
  806. bool runtimeChecksRequired();
  807. /// \return The most profitable vectorization factor and the cost of that VF.
  808. /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
  809. /// then this vectorization factor will be selected if vectorization is
  810. /// possible.
  811. VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
  812. /// Setup cost-based decisions for user vectorization factor.
  813. void selectUserVectorizationFactor(unsigned UserVF) {
  814. collectUniformsAndScalars(UserVF);
  815. collectInstsToScalarize(UserVF);
  816. }
  817. /// \return The size (in bits) of the smallest and widest types in the code
  818. /// that needs to be vectorized. We ignore values that remain scalar such as
  819. /// 64 bit loop indices.
  820. std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
  821. /// \return The desired interleave count.
  822. /// If interleave count has been specified by metadata it will be returned.
  823. /// Otherwise, the interleave count is computed and returned. VF and LoopCost
  824. /// are the selected vectorization factor and the cost of the selected VF.
  825. unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost);
  826. /// Memory access instruction may be vectorized in more than one way.
  827. /// Form of instruction after vectorization depends on cost.
  828. /// This function takes cost-based decisions for Load/Store instructions
  829. /// and collects them in a map. This decisions map is used for building
  830. /// the lists of loop-uniform and loop-scalar instructions.
  831. /// The calculated cost is saved with widening decision in order to
  832. /// avoid redundant calculations.
  833. void setCostBasedWideningDecision(unsigned VF);
  834. /// A struct that represents some properties of the register usage
  835. /// of a loop.
  836. struct RegisterUsage {
  837. /// Holds the number of loop invariant values that are used in the loop.
  838. unsigned LoopInvariantRegs;
  839. /// Holds the maximum number of concurrent live intervals in the loop.
  840. unsigned MaxLocalUsers;
  841. };
  842. /// \return Returns information about the register usages of the loop for the
  843. /// given vectorization factors.
  844. SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
  845. /// Collect values we want to ignore in the cost model.
  846. void collectValuesToIgnore();
  847. /// \returns The smallest bitwidth each instruction can be represented with.
  848. /// The vector equivalents of these instructions should be truncated to this
  849. /// type.
  850. const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
  851. return MinBWs;
  852. }
  853. /// \returns True if it is more profitable to scalarize instruction \p I for
  854. /// vectorization factor \p VF.
  855. bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
  856. assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
  857. // Cost model is not run in the VPlan-native path - return conservative
  858. // result until this changes.
  859. if (EnableVPlanNativePath)
  860. return false;
  861. auto Scalars = InstsToScalarize.find(VF);
  862. assert(Scalars != InstsToScalarize.end() &&
  863. "VF not yet analyzed for scalarization profitability");
  864. return Scalars->second.find(I) != Scalars->second.end();
  865. }
  866. /// Returns true if \p I is known to be uniform after vectorization.
  867. bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
  868. if (VF == 1)
  869. return true;
  870. // Cost model is not run in the VPlan-native path - return conservative
  871. // result until this changes.
  872. if (EnableVPlanNativePath)
  873. return false;
  874. auto UniformsPerVF = Uniforms.find(VF);
  875. assert(UniformsPerVF != Uniforms.end() &&
  876. "VF not yet analyzed for uniformity");
  877. return UniformsPerVF->second.find(I) != UniformsPerVF->second.end();
  878. }
  879. /// Returns true if \p I is known to be scalar after vectorization.
  880. bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
  881. if (VF == 1)
  882. return true;
  883. // Cost model is not run in the VPlan-native path - return conservative
  884. // result until this changes.
  885. if (EnableVPlanNativePath)
  886. return false;
  887. auto ScalarsPerVF = Scalars.find(VF);
  888. assert(ScalarsPerVF != Scalars.end() &&
  889. "Scalar values are not calculated for VF");
  890. return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end();
  891. }
  892. /// \returns True if instruction \p I can be truncated to a smaller bitwidth
  893. /// for vectorization factor \p VF.
  894. bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
  895. return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
  896. !isProfitableToScalarize(I, VF) &&
  897. !isScalarAfterVectorization(I, VF);
  898. }
  899. /// Decision that was taken during cost calculation for memory instruction.
  900. enum InstWidening {
  901. CM_Unknown,
  902. CM_Widen, // For consecutive accesses with stride +1.
  903. CM_Widen_Reverse, // For consecutive accesses with stride -1.
  904. CM_Interleave,
  905. CM_GatherScatter,
  906. CM_Scalarize
  907. };
  908. /// Save vectorization decision \p W and \p Cost taken by the cost model for
  909. /// instruction \p I and vector width \p VF.
  910. void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
  911. unsigned Cost) {
  912. assert(VF >= 2 && "Expected VF >=2");
  913. WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
  914. }
  915. /// Save vectorization decision \p W and \p Cost taken by the cost model for
  916. /// interleaving group \p Grp and vector width \p VF.
  917. void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
  918. InstWidening W, unsigned Cost) {
  919. assert(VF >= 2 && "Expected VF >=2");
  920. /// Broadcast this decicion to all instructions inside the group.
  921. /// But the cost will be assigned to one instruction only.
  922. for (unsigned i = 0; i < Grp->getFactor(); ++i) {
  923. if (auto *I = Grp->getMember(i)) {
  924. if (Grp->getInsertPos() == I)
  925. WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
  926. else
  927. WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
  928. }
  929. }
  930. }
  931. /// Return the cost model decision for the given instruction \p I and vector
  932. /// width \p VF. Return CM_Unknown if this instruction did not pass
  933. /// through the cost modeling.
  934. InstWidening getWideningDecision(Instruction *I, unsigned VF) {
  935. assert(VF >= 2 && "Expected VF >=2");
  936. // Cost model is not run in the VPlan-native path - return conservative
  937. // result until this changes.
  938. if (EnableVPlanNativePath)
  939. return CM_GatherScatter;
  940. std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
  941. auto Itr = WideningDecisions.find(InstOnVF);
  942. if (Itr == WideningDecisions.end())
  943. return CM_Unknown;
  944. return Itr->second.first;
  945. }
  946. /// Return the vectorization cost for the given instruction \p I and vector
  947. /// width \p VF.
  948. unsigned getWideningCost(Instruction *I, unsigned VF) {
  949. assert(VF >= 2 && "Expected VF >=2");
  950. std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
  951. assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
  952. "The cost is not calculated");
  953. return WideningDecisions[InstOnVF].second;
  954. }
  955. /// Return True if instruction \p I is an optimizable truncate whose operand
  956. /// is an induction variable. Such a truncate will be removed by adding a new
  957. /// induction variable with the destination type.
  958. bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
  959. // If the instruction is not a truncate, return false.
  960. auto *Trunc = dyn_cast<TruncInst>(I);
  961. if (!Trunc)
  962. return false;
  963. // Get the source and destination types of the truncate.
  964. Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
  965. Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
  966. // If the truncate is free for the given types, return false. Replacing a
  967. // free truncate with an induction variable would add an induction variable
  968. // update instruction to each iteration of the loop. We exclude from this
  969. // check the primary induction variable since it will need an update
  970. // instruction regardless.
  971. Value *Op = Trunc->getOperand(0);
  972. if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
  973. return false;
  974. // If the truncated value is not an induction variable, return false.
  975. return Legal->isInductionPhi(Op);
  976. }
  977. /// Collects the instructions to scalarize for each predicated instruction in
  978. /// the loop.
  979. void collectInstsToScalarize(unsigned VF);
  980. /// Collect Uniform and Scalar values for the given \p VF.
  981. /// The sets depend on CM decision for Load/Store instructions
  982. /// that may be vectorized as interleave, gather-scatter or scalarized.
  983. void collectUniformsAndScalars(unsigned VF) {
  984. // Do the analysis once.
  985. if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
  986. return;
  987. setCostBasedWideningDecision(VF);
  988. collectLoopUniforms(VF);
  989. collectLoopScalars(VF);
  990. }
  991. /// Returns true if the target machine supports masked store operation
  992. /// for the given \p DataType and kind of access to \p Ptr.
  993. bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
  994. return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType);
  995. }
  996. /// Returns true if the target machine supports masked load operation
  997. /// for the given \p DataType and kind of access to \p Ptr.
  998. bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
  999. return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType);
  1000. }
  1001. /// Returns true if the target machine supports masked scatter operation
  1002. /// for the given \p DataType.
  1003. bool isLegalMaskedScatter(Type *DataType) {
  1004. return TTI.isLegalMaskedScatter(DataType);
  1005. }
  1006. /// Returns true if the target machine supports masked gather operation
  1007. /// for the given \p DataType.
  1008. bool isLegalMaskedGather(Type *DataType) {
  1009. return TTI.isLegalMaskedGather(DataType);
  1010. }
  1011. /// Returns true if the target machine can represent \p V as a masked gather
  1012. /// or scatter operation.
  1013. bool isLegalGatherOrScatter(Value *V) {
  1014. bool LI = isa<LoadInst>(V);
  1015. bool SI = isa<StoreInst>(V);
  1016. if (!LI && !SI)
  1017. return false;
  1018. auto *Ty = getMemInstValueType(V);
  1019. return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
  1020. }
  1021. /// Returns true if \p I is an instruction that will be scalarized with
  1022. /// predication. Such instructions include conditional stores and
  1023. /// instructions that may divide by zero.
  1024. /// If a non-zero VF has been calculated, we check if I will be scalarized
  1025. /// predication for that VF.
  1026. bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
  1027. // Returns true if \p I is an instruction that will be predicated either
  1028. // through scalar predication or masked load/store or masked gather/scatter.
  1029. // Superset of instructions that return true for isScalarWithPredication.
  1030. bool isPredicatedInst(Instruction *I) {
  1031. if (!blockNeedsPredication(I->getParent()))
  1032. return false;
  1033. // Loads and stores that need some form of masked operation are predicated
  1034. // instructions.
  1035. if (isa<LoadInst>(I) || isa<StoreInst>(I))
  1036. return Legal->isMaskRequired(I);
  1037. return isScalarWithPredication(I);
  1038. }
  1039. /// Returns true if \p I is a memory instruction with consecutive memory
  1040. /// access that can be widened.
  1041. bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
  1042. /// Returns true if \p I is a memory instruction in an interleaved-group
  1043. /// of memory accesses that can be vectorized with wide vector loads/stores
  1044. /// and shuffles.
  1045. bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
  1046. /// Check if \p Instr belongs to any interleaved access group.
  1047. bool isAccessInterleaved(Instruction *Instr) {
  1048. return InterleaveInfo.isInterleaved(Instr);
  1049. }
  1050. /// Get the interleaved access group that \p Instr belongs to.
  1051. const InterleaveGroup<Instruction> *
  1052. getInterleavedAccessGroup(Instruction *Instr) {
  1053. return InterleaveInfo.getInterleaveGroup(Instr);
  1054. }
  1055. /// Returns true if an interleaved group requires a scalar iteration
  1056. /// to handle accesses with gaps, and there is nothing preventing us from
  1057. /// creating a scalar epilogue.
  1058. bool requiresScalarEpilogue() const {
  1059. return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
  1060. }
  1061. /// Returns true if a scalar epilogue is not allowed due to optsize or a
  1062. /// loop hint annotation.
  1063. bool isScalarEpilogueAllowed() const {
  1064. return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
  1065. }
  1066. /// Returns true if all loop blocks should be masked to fold tail loop.
  1067. bool foldTailByMasking() const { return FoldTailByMasking; }
  1068. bool blockNeedsPredication(BasicBlock *BB) {
  1069. return foldTailByMasking() || Legal->blockNeedsPredication(BB);
  1070. }
  1071. /// Estimate cost of an intrinsic call instruction CI if it were vectorized
  1072. /// with factor VF. Return the cost of the instruction, including
  1073. /// scalarization overhead if it's needed.
  1074. unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF);
  1075. /// Estimate cost of a call instruction CI if it were vectorized with factor
  1076. /// VF. Return the cost of the instruction, including scalarization overhead
  1077. /// if it's needed. The flag NeedToScalarize shows if the call needs to be
  1078. /// scalarized -
  1079. /// i.e. either vector version isn't available, or is too expensive.
  1080. unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize);
  1081. private:
  1082. unsigned NumPredStores = 0;
  1083. /// \return An upper bound for the vectorization factor, larger than zero.
  1084. /// One is returned if vectorization should best be avoided due to cost.
  1085. unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
  1086. /// The vectorization cost is a combination of the cost itself and a boolean
  1087. /// indicating whether any of the contributing operations will actually
  1088. /// operate on
  1089. /// vector values after type legalization in the backend. If this latter value
  1090. /// is
  1091. /// false, then all operations will be scalarized (i.e. no vectorization has
  1092. /// actually taken place).
  1093. using VectorizationCostTy = std::pair<unsigned, bool>;
  1094. /// Returns the expected execution cost. The unit of the cost does
  1095. /// not matter because we use the 'cost' units to compare different
  1096. /// vector widths. The cost that is returned is *not* normalized by
  1097. /// the factor width.
  1098. VectorizationCostTy expectedCost(unsigned VF);
  1099. /// Returns the execution time cost of an instruction for a given vector
  1100. /// width. Vector width of one means scalar.
  1101. VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
  1102. /// The cost-computation logic from getInstructionCost which provides
  1103. /// the vector type as an output parameter.
  1104. unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
  1105. /// Calculate vectorization cost of memory instruction \p I.
  1106. unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
  1107. /// The cost computation for scalarized memory instruction.
  1108. unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
  1109. /// The cost computation for interleaving group of memory instructions.
  1110. unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
  1111. /// The cost computation for Gather/Scatter instruction.
  1112. unsigned getGatherScatterCost(Instruction *I, unsigned VF);
  1113. /// The cost computation for widening instruction \p I with consecutive
  1114. /// memory access.
  1115. unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
  1116. /// The cost calculation for Load/Store instruction \p I with uniform pointer -
  1117. /// Load: scalar load + broadcast.
  1118. /// Store: scalar store + (loop invariant value stored? 0 : extract of last
  1119. /// element)
  1120. unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
  1121. /// Estimate the overhead of scalarizing an instruction. This is a
  1122. /// convenience wrapper for the type-based getScalarizationOverhead API.
  1123. unsigned getScalarizationOverhead(Instruction *I, unsigned VF);
  1124. /// Returns whether the instruction is a load or store and will be a emitted
  1125. /// as a vector operation.
  1126. bool isConsecutiveLoadOrStore(Instruction *I);
  1127. /// Returns true if an artificially high cost for emulated masked memrefs
  1128. /// should be used.
  1129. bool useEmulatedMaskMemRefHack(Instruction *I);
  1130. /// Map of scalar integer values to the smallest bitwidth they can be legally
  1131. /// represented as. The vector equivalents of these values should be truncated
  1132. /// to this type.
  1133. MapVector<Instruction *, uint64_t> MinBWs;
  1134. /// A type representing the costs for instructions if they were to be
  1135. /// scalarized rather than vectorized. The entries are Instruction-Cost
  1136. /// pairs.
  1137. using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
  1138. /// A set containing all BasicBlocks that are known to present after
  1139. /// vectorization as a predicated block.
  1140. SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
  1141. /// Records whether it is allowed to have the original scalar loop execute at
  1142. /// least once. This may be needed as a fallback loop in case runtime
  1143. /// aliasing/dependence checks fail, or to handle the tail/remainder
  1144. /// iterations when the trip count is unknown or doesn't divide by the VF,
  1145. /// or as a peel-loop to handle gaps in interleave-groups.
  1146. /// Under optsize and when the trip count is very small we don't allow any
  1147. /// iterations to execute in the scalar loop.
  1148. ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
  1149. /// All blocks of loop are to be masked to fold tail of scalar iterations.
  1150. bool FoldTailByMasking = false;
  1151. /// A map holding scalar costs for different vectorization factors. The
  1152. /// presence of a cost for an instruction in the mapping indicates that the
  1153. /// instruction will be scalarized when vectorizing with the associated
  1154. /// vectorization factor. The entries are VF-ScalarCostTy pairs.
  1155. DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
  1156. /// Holds the instructions known to be uniform after vectorization.
  1157. /// The data is collected per VF.
  1158. DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
  1159. /// Holds the instructions known to be scalar after vectorization.
  1160. /// The data is collected per VF.
  1161. DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
  1162. /// Holds the instructions (address computations) that are forced to be
  1163. /// scalarized.
  1164. DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
  1165. /// Returns the expected difference in cost from scalarizing the expression
  1166. /// feeding a predicated instruction \p PredInst. The instructions to
  1167. /// scalarize and their scalar costs are collected in \p ScalarCosts. A
  1168. /// non-negative return value implies the expression will be scalarized.
  1169. /// Currently, only single-use chains are considered for scalarization.
  1170. int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
  1171. unsigned VF);
  1172. /// Collect the instructions that are uniform after vectorization. An
  1173. /// instruction is uniform if we represent it with a single scalar value in
  1174. /// the vectorized loop corresponding to each vector iteration. Examples of
  1175. /// uniform instructions include pointer operands of consecutive or
  1176. /// interleaved memory accesses. Note that although uniformity implies an
  1177. /// instruction will be scalar, the reverse is not true. In general, a
  1178. /// scalarized instruction will be represented by VF scalar values in the
  1179. /// vectorized loop, each corresponding to an iteration of the original
  1180. /// scalar loop.
  1181. void collectLoopUniforms(unsigned VF);
  1182. /// Collect the instructions that are scalar after vectorization. An
  1183. /// instruction is scalar if it is known to be uniform or will be scalarized
  1184. /// during vectorization. Non-uniform scalarized instructions will be
  1185. /// represented by VF values in the vectorized loop, each corresponding to an
  1186. /// iteration of the original scalar loop.
  1187. void collectLoopScalars(unsigned VF);
  1188. /// Keeps cost model vectorization decision and cost for instructions.
  1189. /// Right now it is used for memory instructions only.
  1190. using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
  1191. std::pair<InstWidening, unsigned>>;
  1192. DecisionList WideningDecisions;
  1193. /// Returns true if \p V is expected to be vectorized and it needs to be
  1194. /// extracted.
  1195. bool needsExtract(Value *V, unsigned VF) const {
  1196. Instruction *I = dyn_cast<Instruction>(V);
  1197. if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I))
  1198. return false;
  1199. // Assume we can vectorize V (and hence we need extraction) if the
  1200. // scalars are not computed yet. This can happen, because it is called
  1201. // via getScalarizationOverhead from setCostBasedWideningDecision, before
  1202. // the scalars are collected. That should be a safe assumption in most
  1203. // cases, because we check if the operands have vectorizable types
  1204. // beforehand in LoopVectorizationLegality.
  1205. return Scalars.find(VF) == Scalars.end() ||
  1206. !isScalarAfterVectorization(I, VF);
  1207. };
  1208. /// Returns a range containing only operands needing to be extracted.
  1209. SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
  1210. unsigned VF) {
  1211. return SmallVector<Value *, 4>(make_filter_range(
  1212. Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
  1213. }
  1214. public:
  1215. /// The loop that we evaluate.
  1216. Loop *TheLoop;
  1217. /// Predicated scalar evolution analysis.
  1218. PredicatedScalarEvolution &PSE;
  1219. /// Loop Info analysis.
  1220. LoopInfo *LI;
  1221. /// Vectorization legality.
  1222. LoopVectorizationLegality *Legal;
  1223. /// Vector target information.
  1224. const TargetTransformInfo &TTI;
  1225. /// Target Library Info.
  1226. const TargetLibraryInfo *TLI;
  1227. /// Demanded bits analysis.
  1228. DemandedBits *DB;
  1229. /// Assumption cache.
  1230. AssumptionCache *AC;
  1231. /// Interface to emit optimization remarks.
  1232. OptimizationRemarkEmitter *ORE;
  1233. const Function *TheFunction;
  1234. /// Loop Vectorize Hint.
  1235. const LoopVectorizeHints *Hints;
  1236. /// The interleave access information contains groups of interleaved accesses
  1237. /// with the same stride and close to each other.
  1238. InterleavedAccessInfo &InterleaveInfo;
  1239. /// Values to ignore in the cost model.
  1240. SmallPtrSet<const Value *, 16> ValuesToIgnore;
  1241. /// Values to ignore in the cost model when VF > 1.
  1242. SmallPtrSet<const Value *, 16> VecValuesToIgnore;
  1243. };
  1244. } // end namespace llvm
  1245. // Return true if \p OuterLp is an outer loop annotated with hints for explicit
  1246. // vectorization. The loop needs to be annotated with #pragma omp simd
  1247. // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
  1248. // vector length information is not provided, vectorization is not considered
  1249. // explicit. Interleave hints are not allowed either. These limitations will be
  1250. // relaxed in the future.
  1251. // Please, note that we are currently forced to abuse the pragma 'clang
  1252. // vectorize' semantics. This pragma provides *auto-vectorization hints*
  1253. // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
  1254. // provides *explicit vectorization hints* (LV can bypass legal checks and
  1255. // assume that vectorization is legal). However, both hints are implemented
  1256. // using the same metadata (llvm.loop.vectorize, processed by
  1257. // LoopVectorizeHints). This will be fixed in the future when the native IR
  1258. // representation for pragma 'omp simd' is introduced.
  1259. static bool isExplicitVecOuterLoop(Loop *OuterLp,
  1260. OptimizationRemarkEmitter *ORE) {
  1261. assert(!OuterLp->empty() && "This is not an outer loop");
  1262. LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
  1263. // Only outer loops with an explicit vectorization hint are supported.
  1264. // Unannotated outer loops are ignored.
  1265. if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
  1266. return false;
  1267. Function *Fn = OuterLp->getHeader()->getParent();
  1268. if (!Hints.allowVectorization(Fn, OuterLp,
  1269. true /*VectorizeOnlyWhenForced*/)) {
  1270. LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
  1271. return false;
  1272. }
  1273. if (Hints.getInterleave() > 1) {
  1274. // TODO: Interleave support is future work.
  1275. LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
  1276. "outer loops.\n");
  1277. Hints.emitRemarkWithHints();
  1278. return false;
  1279. }
  1280. return true;
  1281. }
  1282. static void collectSupportedLoops(Loop &L, LoopInfo *LI,
  1283. OptimizationRemarkEmitter *ORE,
  1284. SmallVectorImpl<Loop *> &V) {
  1285. // Collect inner loops and outer loops without irreducible control flow. For
  1286. // now, only collect outer loops that have explicit vectorization hints. If we
  1287. // are stress testing the VPlan H-CFG construction, we collect the outermost
  1288. // loop of every loop nest.
  1289. if (L.empty() || VPlanBuildStressTest ||
  1290. (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
  1291. LoopBlocksRPO RPOT(&L);
  1292. RPOT.perform(LI);
  1293. if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
  1294. V.push_back(&L);
  1295. // TODO: Collect inner loops inside marked outer loops in case
  1296. // vectorization fails for the outer loop. Do not invoke
  1297. // 'containsIrreducibleCFG' again for inner loops when the outer loop is
  1298. // already known to be reducible. We can use an inherited attribute for
  1299. // that.
  1300. return;
  1301. }
  1302. }
  1303. for (Loop *InnerL : L)
  1304. collectSupportedLoops(*InnerL, LI, ORE, V);
  1305. }
  1306. namespace {
  1307. /// The LoopVectorize Pass.
  1308. struct LoopVectorize : public FunctionPass {
  1309. /// Pass identification, replacement for typeid
  1310. static char ID;
  1311. LoopVectorizePass Impl;
  1312. explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
  1313. bool VectorizeOnlyWhenForced = false)
  1314. : FunctionPass(ID) {
  1315. Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced;
  1316. Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced;
  1317. initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
  1318. }
  1319. bool runOnFunction(Function &F) override {
  1320. if (skipFunction(F))
  1321. return false;
  1322. auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  1323. auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  1324. auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
  1325. auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  1326. auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
  1327. auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
  1328. auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
  1329. auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  1330. auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
  1331. auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
  1332. auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
  1333. auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
  1334. auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
  1335. std::function<const LoopAccessInfo &(Loop &)> GetLAA =
  1336. [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
  1337. return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
  1338. GetLAA, *ORE, PSI);
  1339. }
  1340. void getAnalysisUsage(AnalysisUsage &AU) const override {
  1341. AU.addRequired<AssumptionCacheTracker>();
  1342. AU.addRequired<BlockFrequencyInfoWrapperPass>();
  1343. AU.addRequired<DominatorTreeWrapperPass>();
  1344. AU.addRequired<LoopInfoWrapperPass>();
  1345. AU.addRequired<ScalarEvolutionWrapperPass>();
  1346. AU.addRequired<TargetTransformInfoWrapperPass>();
  1347. AU.addRequired<AAResultsWrapperPass>();
  1348. AU.addRequired<LoopAccessLegacyAnalysis>();
  1349. AU.addRequired<DemandedBitsWrapperPass>();
  1350. AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
  1351. // We currently do not preserve loopinfo/dominator analyses with outer loop
  1352. // vectorization. Until this is addressed, mark these analyses as preserved
  1353. // only for non-VPlan-native path.
  1354. // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
  1355. if (!EnableVPlanNativePath) {
  1356. AU.addPreserved<LoopInfoWrapperPass>();
  1357. AU.addPreserved<DominatorTreeWrapperPass>();
  1358. }
  1359. AU.addPreserved<BasicAAWrapperPass>();
  1360. AU.addPreserved<GlobalsAAWrapperPass>();
  1361. AU.addRequired<ProfileSummaryInfoWrapperPass>();
  1362. }
  1363. };
  1364. } // end anonymous namespace
  1365. //===----------------------------------------------------------------------===//
  1366. // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
  1367. // LoopVectorizationCostModel and LoopVectorizationPlanner.
  1368. //===----------------------------------------------------------------------===//
  1369. Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
  1370. // We need to place the broadcast of invariant variables outside the loop,
  1371. // but only if it's proven safe to do so. Else, broadcast will be inside
  1372. // vector loop body.
  1373. Instruction *Instr = dyn_cast<Instruction>(V);
  1374. bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
  1375. (!Instr ||
  1376. DT->dominates(Instr->getParent(), LoopVectorPreHeader));
  1377. // Place the code for broadcasting invariant variables in the new preheader.
  1378. IRBuilder<>::InsertPointGuard Guard(Builder);
  1379. if (SafeToHoist)
  1380. Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
  1381. // Broadcast the scalar into all locations in the vector.
  1382. Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
  1383. return Shuf;
  1384. }
  1385. void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
  1386. const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
  1387. assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
  1388. "Expected either an induction phi-node or a truncate of it!");
  1389. Value *Start = II.getStartValue();
  1390. // Construct the initial value of the vector IV in the vector loop preheader
  1391. auto CurrIP = Builder.saveIP();
  1392. Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
  1393. if (isa<TruncInst>(EntryVal)) {
  1394. assert(Start->getType()->isIntegerTy() &&
  1395. "Truncation requires an integer type");
  1396. auto *TruncType = cast<IntegerType>(EntryVal->getType());
  1397. Step = Builder.CreateTrunc(Step, TruncType);
  1398. Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
  1399. }
  1400. Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
  1401. Value *SteppedStart =
  1402. getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
  1403. // We create vector phi nodes for both integer and floating-point induction
  1404. // variables. Here, we determine the kind of arithmetic we will perform.
  1405. Instruction::BinaryOps AddOp;
  1406. Instruction::BinaryOps MulOp;
  1407. if (Step->getType()->isIntegerTy()) {
  1408. AddOp = Instruction::Add;
  1409. MulOp = Instruction::Mul;
  1410. } else {
  1411. AddOp = II.getInductionOpcode();
  1412. MulOp = Instruction::FMul;
  1413. }
  1414. // Multiply the vectorization factor by the step using integer or
  1415. // floating-point arithmetic as appropriate.
  1416. Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
  1417. Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
  1418. // Create a vector splat to use in the induction update.
  1419. //
  1420. // FIXME: If the step is non-constant, we create the vector splat with
  1421. // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
  1422. // handle a constant vector splat.
  1423. Value *SplatVF = isa<Constant>(Mul)
  1424. ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
  1425. : Builder.CreateVectorSplat(VF, Mul);
  1426. Builder.restoreIP(CurrIP);
  1427. // We may need to add the step a number of times, depending on the unroll
  1428. // factor. The last of those goes into the PHI.
  1429. PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
  1430. &*LoopVectorBody->getFirstInsertionPt());
  1431. VecInd->setDebugLoc(EntryVal->getDebugLoc());
  1432. Instruction *LastInduction = VecInd;
  1433. for (unsigned Part = 0; Part < UF; ++Part) {
  1434. VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
  1435. if (isa<TruncInst>(EntryVal))
  1436. addMetadata(LastInduction, EntryVal);
  1437. recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
  1438. LastInduction = cast<Instruction>(addFastMathFlag(
  1439. Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
  1440. LastInduction->setDebugLoc(EntryVal->getDebugLoc());
  1441. }
  1442. // Move the last step to the end of the latch block. This ensures consistent
  1443. // placement of all induction updates.
  1444. auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
  1445. auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
  1446. auto *ICmp = cast<Instruction>(Br->getCondition());
  1447. LastInduction->moveBefore(ICmp);
  1448. LastInduction->setName("vec.ind.next");
  1449. VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
  1450. VecInd->addIncoming(LastInduction, LoopVectorLatch);
  1451. }
  1452. bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
  1453. return Cost->isScalarAfterVectorization(I, VF) ||
  1454. Cost->isProfitableToScalarize(I, VF);
  1455. }
  1456. bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
  1457. if (shouldScalarizeInstruction(IV))
  1458. return true;
  1459. auto isScalarInst = [&](User *U) -> bool {
  1460. auto *I = cast<Instruction>(U);
  1461. return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
  1462. };
  1463. return llvm::any_of(IV->users(), isScalarInst);
  1464. }
  1465. void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
  1466. const InductionDescriptor &ID, const Instruction *EntryVal,
  1467. Value *VectorLoopVal, unsigned Part, unsigned Lane) {
  1468. assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
  1469. "Expected either an induction phi-node or a truncate of it!");
  1470. // This induction variable is not the phi from the original loop but the
  1471. // newly-created IV based on the proof that casted Phi is equal to the
  1472. // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
  1473. // re-uses the same InductionDescriptor that original IV uses but we don't
  1474. // have to do any recording in this case - that is done when original IV is
  1475. // processed.
  1476. if (isa<TruncInst>(EntryVal))
  1477. return;
  1478. const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
  1479. if (Casts.empty())
  1480. return;
  1481. // Only the first Cast instruction in the Casts vector is of interest.
  1482. // The rest of the Casts (if exist) have no uses outside the
  1483. // induction update chain itself.
  1484. Instruction *CastInst = *Casts.begin();
  1485. if (Lane < UINT_MAX)
  1486. VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
  1487. else
  1488. VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
  1489. }
  1490. void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
  1491. assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
  1492. "Primary induction variable must have an integer type");
  1493. auto II = Legal->getInductionVars()->find(IV);
  1494. assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
  1495. auto ID = II->second;
  1496. assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
  1497. // The scalar value to broadcast. This will be derived from the canonical
  1498. // induction variable.
  1499. Value *ScalarIV = nullptr;
  1500. // The value from the original loop to which we are mapping the new induction
  1501. // variable.
  1502. Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
  1503. // True if we have vectorized the induction variable.
  1504. auto VectorizedIV = false;
  1505. // Determine if we want a scalar version of the induction variable. This is
  1506. // true if the induction variable itself is not widened, or if it has at
  1507. // least one user in the loop that is not widened.
  1508. auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
  1509. // Generate code for the induction step. Note that induction steps are
  1510. // required to be loop-invariant
  1511. assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
  1512. "Induction step should be loop invariant");
  1513. auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
  1514. Value *Step = nullptr;
  1515. if (PSE.getSE()->isSCEVable(IV->getType())) {
  1516. SCEVExpander Exp(*PSE.getSE(), DL, "induction");
  1517. Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
  1518. LoopVectorPreHeader->getTerminator());
  1519. } else {
  1520. Step = cast<SCEVUnknown>(ID.getStep())->getValue();
  1521. }
  1522. // Try to create a new independent vector induction variable. If we can't
  1523. // create the phi node, we will splat the scalar induction variable in each
  1524. // loop iteration.
  1525. if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
  1526. createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
  1527. VectorizedIV = true;
  1528. }
  1529. // If we haven't yet vectorized the induction variable, or if we will create
  1530. // a scalar one, we need to define the scalar induction variable and step
  1531. // values. If we were given a truncation type, truncate the canonical
  1532. // induction variable and step. Otherwise, derive these values from the
  1533. // induction descriptor.
  1534. if (!VectorizedIV || NeedsScalarIV) {
  1535. ScalarIV = Induction;
  1536. if (IV != OldInduction) {
  1537. ScalarIV = IV->getType()->isIntegerTy()
  1538. ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
  1539. : Builder.CreateCast(Instruction::SIToFP, Induction,
  1540. IV->getType());
  1541. ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
  1542. ScalarIV->setName("offset.idx");
  1543. }
  1544. if (Trunc) {
  1545. auto *TruncType = cast<IntegerType>(Trunc->getType());
  1546. assert(Step->getType()->isIntegerTy() &&
  1547. "Truncation requires an integer step");
  1548. ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
  1549. Step = Builder.CreateTrunc(Step, TruncType);
  1550. }
  1551. }
  1552. // If we haven't yet vectorized the induction variable, splat the scalar
  1553. // induction variable, and build the necessary step vectors.
  1554. // TODO: Don't do it unless the vectorized IV is really required.
  1555. if (!VectorizedIV) {
  1556. Value *Broadcasted = getBroadcastInstrs(ScalarIV);
  1557. for (unsigned Part = 0; Part < UF; ++Part) {
  1558. Value *EntryPart =
  1559. getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
  1560. VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
  1561. if (Trunc)
  1562. addMetadata(EntryPart, Trunc);
  1563. recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
  1564. }
  1565. }
  1566. // If an induction variable is only used for counting loop iterations or
  1567. // calculating addresses, it doesn't need to be widened. Create scalar steps
  1568. // that can be used by instructions we will later scalarize. Note that the
  1569. // addition of the scalar steps will not increase the number of instructions
  1570. // in the loop in the common case prior to InstCombine. We will be trading
  1571. // one vector extract for each scalar step.
  1572. if (NeedsScalarIV)
  1573. buildScalarSteps(ScalarIV, Step, EntryVal, ID);
  1574. }
  1575. Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
  1576. Instruction::BinaryOps BinOp) {
  1577. // Create and check the types.
  1578. assert(Val->getType()->isVectorTy() && "Must be a vector");
  1579. int VLen = Val->getType()->getVectorNumElements();
  1580. Type *STy = Val->getType()->getScalarType();
  1581. assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
  1582. "Induction Step must be an integer or FP");
  1583. assert(Step->getType() == STy && "Step has wrong type");
  1584. SmallVector<Constant *, 8> Indices;
  1585. if (STy->isIntegerTy()) {
  1586. // Create a vector of consecutive numbers from zero to VF.
  1587. for (int i = 0; i < VLen; ++i)
  1588. Indices.push_back(ConstantInt::get(STy, StartIdx + i));
  1589. // Add the consecutive indices to the vector value.
  1590. Constant *Cv = ConstantVector::get(Indices);
  1591. assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
  1592. Step = Builder.CreateVectorSplat(VLen, Step);
  1593. assert(Step->getType() == Val->getType() && "Invalid step vec");
  1594. // FIXME: The newly created binary instructions should contain nsw/nuw flags,
  1595. // which can be found from the original scalar operations.
  1596. Step = Builder.CreateMul(Cv, Step);
  1597. return Builder.CreateAdd(Val, Step, "induction");
  1598. }
  1599. // Floating point induction.
  1600. assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
  1601. "Binary Opcode should be specified for FP induction");
  1602. // Create a vector of consecutive numbers from zero to VF.
  1603. for (int i = 0; i < VLen; ++i)
  1604. Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
  1605. // Add the consecutive indices to the vector value.
  1606. Constant *Cv = ConstantVector::get(Indices);
  1607. Step = Builder.CreateVectorSplat(VLen, Step);
  1608. // Floating point operations had to be 'fast' to enable the induction.
  1609. FastMathFlags Flags;
  1610. Flags.setFast();
  1611. Value *MulOp = Builder.CreateFMul(Cv, Step);
  1612. if (isa<Instruction>(MulOp))
  1613. // Have to check, MulOp may be a constant
  1614. cast<Instruction>(MulOp)->setFastMathFlags(Flags);
  1615. Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
  1616. if (isa<Instruction>(BOp))
  1617. cast<Instruction>(BOp)->setFastMathFlags(Flags);
  1618. return BOp;
  1619. }
  1620. void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
  1621. Instruction *EntryVal,
  1622. const InductionDescriptor &ID) {
  1623. // We shouldn't have to build scalar steps if we aren't vectorizing.
  1624. assert(VF > 1 && "VF should be greater than one");
  1625. // Get the value type and ensure it and the step have the same integer type.
  1626. Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
  1627. assert(ScalarIVTy == Step->getType() &&
  1628. "Val and Step should have the same type");
  1629. // We build scalar steps for both integer and floating-point induction
  1630. // variables. Here, we determine the kind of arithmetic we will perform.
  1631. Instruction::BinaryOps AddOp;
  1632. Instruction::BinaryOps MulOp;
  1633. if (ScalarIVTy->isIntegerTy()) {
  1634. AddOp = Instruction::Add;
  1635. MulOp = Instruction::Mul;
  1636. } else {
  1637. AddOp = ID.getInductionOpcode();
  1638. MulOp = Instruction::FMul;
  1639. }
  1640. // Determine the number of scalars we need to generate for each unroll
  1641. // iteration. If EntryVal is uniform, we only need to generate the first
  1642. // lane. Otherwise, we generate all VF values.
  1643. unsigned Lanes =
  1644. Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
  1645. : VF;
  1646. // Compute the scalar steps and save the results in VectorLoopValueMap.
  1647. for (unsigned Part = 0; Part < UF; ++Part) {
  1648. for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
  1649. auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
  1650. auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
  1651. auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
  1652. VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
  1653. recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
  1654. }
  1655. }
  1656. }
  1657. Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
  1658. assert(V != Induction && "The new induction variable should not be used.");
  1659. assert(!V->getType()->isVectorTy() && "Can't widen a vector");
  1660. assert(!V->getType()->isVoidTy() && "Type does not produce a value");
  1661. // If we have a stride that is replaced by one, do it here. Defer this for
  1662. // the VPlan-native path until we start running Legal checks in that path.
  1663. if (!EnableVPlanNativePath && Legal->hasStride(V))
  1664. V = ConstantInt::get(V->getType(), 1);
  1665. // If we have a vector mapped to this value, return it.
  1666. if (VectorLoopValueMap.hasVectorValue(V, Part))
  1667. return VectorLoopValueMap.getVectorValue(V, Part);
  1668. // If the value has not been vectorized, check if it has been scalarized
  1669. // instead. If it has been scalarized, and we actually need the value in
  1670. // vector form, we will construct the vector values on demand.
  1671. if (VectorLoopValueMap.hasAnyScalarValue(V)) {
  1672. Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
  1673. // If we've scalarized a value, that value should be an instruction.
  1674. auto *I = cast<Instruction>(V);
  1675. // If we aren't vectorizing, we can just copy the scalar map values over to
  1676. // the vector map.
  1677. if (VF == 1) {
  1678. VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
  1679. return ScalarValue;
  1680. }
  1681. // Get the last scalar instruction we generated for V and Part. If the value
  1682. // is known to be uniform after vectorization, this corresponds to lane zero
  1683. // of the Part unroll iteration. Otherwise, the last instruction is the one
  1684. // we created for the last vector lane of the Part unroll iteration.
  1685. unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
  1686. auto *LastInst = cast<Instruction>(
  1687. VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
  1688. // Set the insert point after the last scalarized instruction. This ensures
  1689. // the insertelement sequence will directly follow the scalar definitions.
  1690. auto OldIP = Builder.saveIP();
  1691. auto NewIP = std::next(BasicBlock::iterator(LastInst));
  1692. Builder.SetInsertPoint(&*NewIP);
  1693. // However, if we are vectorizing, we need to construct the vector values.
  1694. // If the value is known to be uniform after vectorization, we can just
  1695. // broadcast the scalar value corresponding to lane zero for each unroll
  1696. // iteration. Otherwise, we construct the vector values using insertelement
  1697. // instructions. Since the resulting vectors are stored in
  1698. // VectorLoopValueMap, we will only generate the insertelements once.
  1699. Value *VectorValue = nullptr;
  1700. if (Cost->isUniformAfterVectorization(I, VF)) {
  1701. VectorValue = getBroadcastInstrs(ScalarValue);
  1702. VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
  1703. } else {
  1704. // Initialize packing with insertelements to start from undef.
  1705. Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
  1706. VectorLoopValueMap.setVectorValue(V, Part, Undef);
  1707. for (unsigned Lane = 0; Lane < VF; ++Lane)
  1708. packScalarIntoVectorValue(V, {Part, Lane});
  1709. VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
  1710. }
  1711. Builder.restoreIP(OldIP);
  1712. return VectorValue;
  1713. }
  1714. // If this scalar is unknown, assume that it is a constant or that it is
  1715. // loop invariant. Broadcast V and save the value for future uses.
  1716. Value *B = getBroadcastInstrs(V);
  1717. VectorLoopValueMap.setVectorValue(V, Part, B);
  1718. return B;
  1719. }
  1720. Value *
  1721. InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
  1722. const VPIteration &Instance) {
  1723. // If the value is not an instruction contained in the loop, it should
  1724. // already be scalar.
  1725. if (OrigLoop->isLoopInvariant(V))
  1726. return V;
  1727. assert(Instance.Lane > 0
  1728. ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
  1729. : true && "Uniform values only have lane zero");
  1730. // If the value from the original loop has not been vectorized, it is
  1731. // represented by UF x VF scalar values in the new loop. Return the requested
  1732. // scalar value.
  1733. if (VectorLoopValueMap.hasScalarValue(V, Instance))
  1734. return VectorLoopValueMap.getScalarValue(V, Instance);
  1735. // If the value has not been scalarized, get its entry in VectorLoopValueMap
  1736. // for the given unroll part. If this entry is not a vector type (i.e., the
  1737. // vectorization factor is one), there is no need to generate an
  1738. // extractelement instruction.
  1739. auto *U = getOrCreateVectorValue(V, Instance.Part);
  1740. if (!U->getType()->isVectorTy()) {
  1741. assert(VF == 1 && "Value not scalarized has non-vector type");
  1742. return U;
  1743. }
  1744. // Otherwise, the value from the original loop has been vectorized and is
  1745. // represented by UF vector values. Extract and return the requested scalar
  1746. // value from the appropriate vector lane.
  1747. return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
  1748. }
  1749. void InnerLoopVectorizer::packScalarIntoVectorValue(
  1750. Value *V, const VPIteration &Instance) {
  1751. assert(V != Induction && "The new induction variable should not be used.");
  1752. assert(!V->getType()->isVectorTy() && "Can't pack a vector");
  1753. assert(!V->getType()->isVoidTy() && "Type does not produce a value");
  1754. Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
  1755. Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
  1756. VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
  1757. Builder.getInt32(Instance.Lane));
  1758. VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
  1759. }
  1760. Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
  1761. assert(Vec->getType()->isVectorTy() && "Invalid type");
  1762. SmallVector<Constant *, 8> ShuffleMask;
  1763. for (unsigned i = 0; i < VF; ++i)
  1764. ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
  1765. return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
  1766. ConstantVector::get(ShuffleMask),
  1767. "reverse");
  1768. }
  1769. // Return whether we allow using masked interleave-groups (for dealing with
  1770. // strided loads/stores that reside in predicated blocks, or for dealing
  1771. // with gaps).
  1772. static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
  1773. // If an override option has been passed in for interleaved accesses, use it.
  1774. if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
  1775. return EnableMaskedInterleavedMemAccesses;
  1776. return TTI.enableMaskedInterleavedAccessVectorization();
  1777. }
  1778. // Try to vectorize the interleave group that \p Instr belongs to.
  1779. //
  1780. // E.g. Translate following interleaved load group (factor = 3):
  1781. // for (i = 0; i < N; i+=3) {
  1782. // R = Pic[i]; // Member of index 0
  1783. // G = Pic[i+1]; // Member of index 1
  1784. // B = Pic[i+2]; // Member of index 2
  1785. // ... // do something to R, G, B
  1786. // }
  1787. // To:
  1788. // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
  1789. // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements
  1790. // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements
  1791. // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements
  1792. //
  1793. // Or translate following interleaved store group (factor = 3):
  1794. // for (i = 0; i < N; i+=3) {
  1795. // ... do something to R, G, B
  1796. // Pic[i] = R; // Member of index 0
  1797. // Pic[i+1] = G; // Member of index 1
  1798. // Pic[i+2] = B; // Member of index 2
  1799. // }
  1800. // To:
  1801. // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
  1802. // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
  1803. // %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
  1804. // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
  1805. // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
  1806. void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
  1807. VectorParts *BlockInMask) {
  1808. const InterleaveGroup<Instruction> *Group =
  1809. Cost->getInterleavedAccessGroup(Instr);
  1810. assert(Group && "Fail to get an interleaved access group.");
  1811. // Skip if current instruction is not the insert position.
  1812. if (Instr != Group->getInsertPos())
  1813. return;
  1814. const DataLayout &DL = Instr->getModule()->getDataLayout();
  1815. Value *Ptr = getLoadStorePointerOperand(Instr);
  1816. // Prepare for the vector type of the interleaved load/store.
  1817. Type *ScalarTy = getMemInstValueType(Instr);
  1818. unsigned InterleaveFactor = Group->getFactor();
  1819. Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
  1820. Type *PtrTy = VecTy->getPointerTo(getLoadStoreAddressSpace(Instr));
  1821. // Prepare for the new pointers.
  1822. setDebugLocFromInst(Builder, Ptr);
  1823. SmallVector<Value *, 2> NewPtrs;
  1824. unsigned Index = Group->getIndex(Instr);
  1825. VectorParts Mask;
  1826. bool IsMaskForCondRequired = BlockInMask;
  1827. if (IsMaskForCondRequired) {
  1828. Mask = *BlockInMask;
  1829. // TODO: extend the masked interleaved-group support to reversed access.
  1830. assert(!Group->isReverse() && "Reversed masked interleave-group "
  1831. "not supported.");
  1832. }
  1833. // If the group is reverse, adjust the index to refer to the last vector lane
  1834. // instead of the first. We adjust the index from the first vector lane,
  1835. // rather than directly getting the pointer for lane VF - 1, because the
  1836. // pointer operand of the interleaved access is supposed to be uniform. For
  1837. // uniform instructions, we're only required to generate a value for the
  1838. // first vector lane in each unroll iteration.
  1839. if (Group->isReverse())
  1840. Index += (VF - 1) * Group->getFactor();
  1841. bool InBounds = false;
  1842. if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
  1843. InBounds = gep->isInBounds();
  1844. for (unsigned Part = 0; Part < UF; Part++) {
  1845. Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0});
  1846. // Notice current instruction could be any index. Need to adjust the address
  1847. // to the member of index 0.
  1848. //
  1849. // E.g. a = A[i+1]; // Member of index 1 (Current instruction)
  1850. // b = A[i]; // Member of index 0
  1851. // Current pointer is pointed to A[i+1], adjust it to A[i].
  1852. //
  1853. // E.g. A[i+1] = a; // Member of index 1
  1854. // A[i] = b; // Member of index 0
  1855. // A[i+2] = c; // Member of index 2 (Current instruction)
  1856. // Current pointer is pointed to A[i+2], adjust it to A[i].
  1857. NewPtr = Builder.CreateGEP(ScalarTy, NewPtr, Builder.getInt32(-Index));
  1858. if (InBounds)
  1859. cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true);
  1860. // Cast to the vector pointer type.
  1861. NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
  1862. }
  1863. setDebugLocFromInst(Builder, Instr);
  1864. Value *UndefVec = UndefValue::get(VecTy);
  1865. Value *MaskForGaps = nullptr;
  1866. if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
  1867. MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
  1868. assert(MaskForGaps && "Mask for Gaps is required but it is null");
  1869. }
  1870. // Vectorize the interleaved load group.
  1871. if (isa<LoadInst>(Instr)) {
  1872. // For each unroll part, create a wide load for the group.
  1873. SmallVector<Value *, 2> NewLoads;
  1874. for (unsigned Part = 0; Part < UF; Part++) {
  1875. Instruction *NewLoad;
  1876. if (IsMaskForCondRequired || MaskForGaps) {
  1877. assert(useMaskedInterleavedAccesses(*TTI) &&
  1878. "masked interleaved groups are not allowed.");
  1879. Value *GroupMask = MaskForGaps;
  1880. if (IsMaskForCondRequired) {
  1881. auto *Undefs = UndefValue::get(Mask[Part]->getType());
  1882. auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF);
  1883. Value *ShuffledMask = Builder.CreateShuffleVector(
  1884. Mask[Part], Undefs, RepMask, "interleaved.mask");
  1885. GroupMask = MaskForGaps
  1886. ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
  1887. MaskForGaps)
  1888. : ShuffledMask;
  1889. }
  1890. NewLoad =
  1891. Builder.CreateMaskedLoad(NewPtrs[Part], Group->getAlignment(),
  1892. GroupMask, UndefVec, "wide.masked.vec");
  1893. }
  1894. else
  1895. NewLoad = Builder.CreateAlignedLoad(VecTy, NewPtrs[Part],
  1896. Group->getAlignment(), "wide.vec");
  1897. Group->addMetadata(NewLoad);
  1898. NewLoads.push_back(NewLoad);
  1899. }
  1900. // For each member in the group, shuffle out the appropriate data from the
  1901. // wide loads.
  1902. for (unsigned I = 0; I < InterleaveFactor; ++I) {
  1903. Instruction *Member = Group->getMember(I);
  1904. // Skip the gaps in the group.
  1905. if (!Member)
  1906. continue;
  1907. Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
  1908. for (unsigned Part = 0; Part < UF; Part++) {
  1909. Value *StridedVec = Builder.CreateShuffleVector(
  1910. NewLoads[Part], UndefVec, StrideMask, "strided.vec");
  1911. // If this member has different type, cast the result type.
  1912. if (Member->getType() != ScalarTy) {
  1913. VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
  1914. StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
  1915. }
  1916. if (Group->isReverse())
  1917. StridedVec = reverseVector(StridedVec);
  1918. VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
  1919. }
  1920. }
  1921. return;
  1922. }
  1923. // The sub vector type for current instruction.
  1924. VectorType *SubVT = VectorType::get(ScalarTy, VF);
  1925. // Vectorize the interleaved store group.
  1926. for (unsigned Part = 0; Part < UF; Part++) {
  1927. // Collect the stored vector from each member.
  1928. SmallVector<Value *, 4> StoredVecs;
  1929. for (unsigned i = 0; i < InterleaveFactor; i++) {
  1930. // Interleaved store group doesn't allow a gap, so each index has a member
  1931. Instruction *Member = Group->getMember(i);
  1932. assert(Member && "Fail to get a member from an interleaved store group");
  1933. Value *StoredVec = getOrCreateVectorValue(
  1934. cast<StoreInst>(Member)->getValueOperand(), Part);
  1935. if (Group->isReverse())
  1936. StoredVec = reverseVector(StoredVec);
  1937. // If this member has different type, cast it to a unified type.
  1938. if (StoredVec->getType() != SubVT)
  1939. StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
  1940. StoredVecs.push_back(StoredVec);
  1941. }
  1942. // Concatenate all vectors into a wide vector.
  1943. Value *WideVec = concatenateVectors(Builder, StoredVecs);
  1944. // Interleave the elements in the wide vector.
  1945. Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
  1946. Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
  1947. "interleaved.vec");
  1948. Instruction *NewStoreInstr;
  1949. if (IsMaskForCondRequired) {
  1950. auto *Undefs = UndefValue::get(Mask[Part]->getType());
  1951. auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF);
  1952. Value *ShuffledMask = Builder.CreateShuffleVector(
  1953. Mask[Part], Undefs, RepMask, "interleaved.mask");
  1954. NewStoreInstr = Builder.CreateMaskedStore(
  1955. IVec, NewPtrs[Part], Group->getAlignment(), ShuffledMask);
  1956. }
  1957. else
  1958. NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part],
  1959. Group->getAlignment());
  1960. Group->addMetadata(NewStoreInstr);
  1961. }
  1962. }
  1963. void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
  1964. VectorParts *BlockInMask) {
  1965. // Attempt to issue a wide load.
  1966. LoadInst *LI = dyn_cast<LoadInst>(Instr);
  1967. StoreInst *SI = dyn_cast<StoreInst>(Instr);
  1968. assert((LI || SI) && "Invalid Load/Store instruction");
  1969. LoopVectorizationCostModel::InstWidening Decision =
  1970. Cost->getWideningDecision(Instr, VF);
  1971. assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
  1972. "CM decision should be taken at this point");
  1973. if (Decision == LoopVectorizationCostModel::CM_Interleave)
  1974. return vectorizeInterleaveGroup(Instr);
  1975. Type *ScalarDataTy = getMemInstValueType(Instr);
  1976. Type *DataTy = VectorType::get(ScalarDataTy, VF);
  1977. Value *Ptr = getLoadStorePointerOperand(Instr);
  1978. unsigned Alignment = getLoadStoreAlignment(Instr);
  1979. // An alignment of 0 means target abi alignment. We need to use the scalar's
  1980. // target abi alignment in such a case.
  1981. const DataLayout &DL = Instr->getModule()->getDataLayout();
  1982. if (!Alignment)
  1983. Alignment = DL.getABITypeAlignment(ScalarDataTy);
  1984. unsigned AddressSpace = getLoadStoreAddressSpace(Instr);
  1985. // Determine if the pointer operand of the access is either consecutive or
  1986. // reverse consecutive.
  1987. bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
  1988. bool ConsecutiveStride =
  1989. Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
  1990. bool CreateGatherScatter =
  1991. (Decision == LoopVectorizationCostModel::CM_GatherScatter);
  1992. // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
  1993. // gather/scatter. Otherwise Decision should have been to Scalarize.
  1994. assert((ConsecutiveStride || CreateGatherScatter) &&
  1995. "The instruction should be scalarized");
  1996. // Handle consecutive loads/stores.
  1997. if (ConsecutiveStride)
  1998. Ptr = getOrCreateScalarValue(Ptr, {0, 0});
  1999. VectorParts Mask;
  2000. bool isMaskRequired = BlockInMask;
  2001. if (isMaskRequired)
  2002. Mask = *BlockInMask;
  2003. bool InBounds = false;
  2004. if (auto *gep = dyn_cast<GetElementPtrInst>(
  2005. getLoadStorePointerOperand(Instr)->stripPointerCasts()))
  2006. InBounds = gep->isInBounds();
  2007. const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
  2008. // Calculate the pointer for the specific unroll-part.
  2009. GetElementPtrInst *PartPtr = nullptr;
  2010. if (Reverse) {
  2011. // If the address is consecutive but reversed, then the
  2012. // wide store needs to start at the last vector element.
  2013. PartPtr = cast<GetElementPtrInst>(
  2014. Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF)));
  2015. PartPtr->setIsInBounds(InBounds);
  2016. PartPtr = cast<GetElementPtrInst>(
  2017. Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF)));
  2018. PartPtr->setIsInBounds(InBounds);
  2019. if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
  2020. Mask[Part] = reverseVector(Mask[Part]);
  2021. } else {
  2022. PartPtr = cast<GetElementPtrInst>(
  2023. Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF)));
  2024. PartPtr->setIsInBounds(InBounds);
  2025. }
  2026. return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
  2027. };
  2028. // Handle Stores:
  2029. if (SI) {
  2030. setDebugLocFromInst(Builder, SI);
  2031. for (unsigned Part = 0; Part < UF; ++Part) {
  2032. Instruction *NewSI = nullptr;
  2033. Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part);
  2034. if (CreateGatherScatter) {
  2035. Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
  2036. Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
  2037. NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
  2038. MaskPart);
  2039. } else {
  2040. if (Reverse) {
  2041. // If we store to reverse consecutive memory locations, then we need
  2042. // to reverse the order of elements in the stored value.
  2043. StoredVal = reverseVector(StoredVal);
  2044. // We don't want to update the value in the map as it might be used in
  2045. // another expression. So don't call resetVectorValue(StoredVal).
  2046. }
  2047. auto *VecPtr = CreateVecPtr(Part, Ptr);
  2048. if (isMaskRequired)
  2049. NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
  2050. Mask[Part]);
  2051. else
  2052. NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
  2053. }
  2054. addMetadata(NewSI, SI);
  2055. }
  2056. return;
  2057. }
  2058. // Handle loads.
  2059. assert(LI && "Must have a load instruction");
  2060. setDebugLocFromInst(Builder, LI);
  2061. for (unsigned Part = 0; Part < UF; ++Part) {
  2062. Value *NewLI;
  2063. if (CreateGatherScatter) {
  2064. Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
  2065. Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
  2066. NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
  2067. nullptr, "wide.masked.gather");
  2068. addMetadata(NewLI, LI);
  2069. } else {
  2070. auto *VecPtr = CreateVecPtr(Part, Ptr);
  2071. if (isMaskRequired)
  2072. NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
  2073. UndefValue::get(DataTy),
  2074. "wide.masked.load");
  2075. else
  2076. NewLI =
  2077. Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
  2078. // Add metadata to the load, but setVectorValue to the reverse shuffle.
  2079. addMetadata(NewLI, LI);
  2080. if (Reverse)
  2081. NewLI = reverseVector(NewLI);
  2082. }
  2083. VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
  2084. }
  2085. }
  2086. void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
  2087. const VPIteration &Instance,
  2088. bool IfPredicateInstr) {
  2089. assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
  2090. setDebugLocFromInst(Builder, Instr);
  2091. // Does this instruction return a value ?
  2092. bool IsVoidRetTy = Instr->getType()->isVoidTy();
  2093. Instruction *Cloned = Instr->clone();
  2094. if (!IsVoidRetTy)
  2095. Cloned->setName(Instr->getName() + ".cloned");
  2096. // Replace the operands of the cloned instructions with their scalar
  2097. // equivalents in the new loop.
  2098. for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
  2099. auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
  2100. Cloned->setOperand(op, NewOp);
  2101. }
  2102. addNewMetadata(Cloned, Instr);
  2103. // Place the cloned scalar in the new loop.
  2104. Builder.Insert(Cloned);
  2105. // Add the cloned scalar to the scalar map entry.
  2106. VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
  2107. // If we just cloned a new assumption, add it the assumption cache.
  2108. if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
  2109. if (II->getIntrinsicID() == Intrinsic::assume)
  2110. AC->registerAssumption(II);
  2111. // End if-block.
  2112. if (IfPredicateInstr)
  2113. PredicatedInstructions.push_back(Cloned);
  2114. }
  2115. PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
  2116. Value *End, Value *Step,
  2117. Instruction *DL) {
  2118. BasicBlock *Header = L->getHeader();
  2119. BasicBlock *Latch = L->getLoopLatch();
  2120. // As we're just creating this loop, it's possible no latch exists
  2121. // yet. If so, use the header as this will be a single block loop.
  2122. if (!Latch)
  2123. Latch = Header;
  2124. IRBuilder<> Builder(&*Header->getFirstInsertionPt());
  2125. Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
  2126. setDebugLocFromInst(Builder, OldInst);
  2127. auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
  2128. Builder.SetInsertPoint(Latch->getTerminator());
  2129. setDebugLocFromInst(Builder, OldInst);
  2130. // Create i+1 and fill the PHINode.
  2131. Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
  2132. Induction->addIncoming(Start, L->getLoopPreheader());
  2133. Induction->addIncoming(Next, Latch);
  2134. // Create the compare.
  2135. Value *ICmp = Builder.CreateICmpEQ(Next, End);
  2136. Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
  2137. // Now we have two terminators. Remove the old one from the block.
  2138. Latch->getTerminator()->eraseFromParent();
  2139. return Induction;
  2140. }
  2141. Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
  2142. if (TripCount)
  2143. return TripCount;
  2144. assert(L && "Create Trip Count for null loop.");
  2145. IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
  2146. // Find the loop boundaries.
  2147. ScalarEvolution *SE = PSE.getSE();
  2148. const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
  2149. assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
  2150. "Invalid loop count");
  2151. Type *IdxTy = Legal->getWidestInductionType();
  2152. assert(IdxTy && "No type for induction");
  2153. // The exit count might have the type of i64 while the phi is i32. This can
  2154. // happen if we have an induction variable that is sign extended before the
  2155. // compare. The only way that we get a backedge taken count is that the
  2156. // induction variable was signed and as such will not overflow. In such a case
  2157. // truncation is legal.
  2158. if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
  2159. IdxTy->getPrimitiveSizeInBits())
  2160. BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
  2161. BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
  2162. // Get the total trip count from the count by adding 1.
  2163. const SCEV *ExitCount = SE->getAddExpr(
  2164. BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
  2165. const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
  2166. // Expand the trip count and place the new instructions in the preheader.
  2167. // Notice that the pre-header does not change, only the loop body.
  2168. SCEVExpander Exp(*SE, DL, "induction");
  2169. // Count holds the overall loop count (N).
  2170. TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
  2171. L->getLoopPreheader()->getTerminator());
  2172. if (TripCount->getType()->isPointerTy())
  2173. TripCount =
  2174. CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
  2175. L->getLoopPreheader()->getTerminator());
  2176. return TripCount;
  2177. }
  2178. Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
  2179. if (VectorTripCount)
  2180. return VectorTripCount;
  2181. Value *TC = getOrCreateTripCount(L);
  2182. IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
  2183. Type *Ty = TC->getType();
  2184. Constant *Step = ConstantInt::get(Ty, VF * UF);
  2185. // If the tail is to be folded by masking, round the number of iterations N
  2186. // up to a multiple of Step instead of rounding down. This is done by first
  2187. // adding Step-1 and then rounding down. Note that it's ok if this addition
  2188. // overflows: the vector induction variable will eventually wrap to zero given
  2189. // that it starts at zero and its Step is a power of two; the loop will then
  2190. // exit, with the last early-exit vector comparison also producing all-true.
  2191. if (Cost->foldTailByMasking()) {
  2192. assert(isPowerOf2_32(VF * UF) &&
  2193. "VF*UF must be a power of 2 when folding tail by masking");
  2194. TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
  2195. }
  2196. // Now we need to generate the expression for the part of the loop that the
  2197. // vectorized body will execute. This is equal to N - (N % Step) if scalar
  2198. // iterations are not required for correctness, or N - Step, otherwise. Step
  2199. // is equal to the vectorization factor (number of SIMD elements) times the
  2200. // unroll factor (number of SIMD instructions).
  2201. Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
  2202. // If there is a non-reversed interleaved group that may speculatively access
  2203. // memory out-of-bounds, we need to ensure that there will be at least one
  2204. // iteration of the scalar epilogue loop. Thus, if the step evenly divides
  2205. // the trip count, we set the remainder to be equal to the step. If the step
  2206. // does not evenly divide the trip count, no adjustment is necessary since
  2207. // there will already be scalar iterations. Note that the minimum iterations
  2208. // check ensures that N >= Step.
  2209. if (VF > 1 && Cost->requiresScalarEpilogue()) {
  2210. auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
  2211. R = Builder.CreateSelect(IsZero, Step, R);
  2212. }
  2213. VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
  2214. return VectorTripCount;
  2215. }
  2216. Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
  2217. const DataLayout &DL) {
  2218. // Verify that V is a vector type with same number of elements as DstVTy.
  2219. unsigned VF = DstVTy->getNumElements();
  2220. VectorType *SrcVecTy = cast<VectorType>(V->getType());
  2221. assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
  2222. Type *SrcElemTy = SrcVecTy->getElementType();
  2223. Type *DstElemTy = DstVTy->getElementType();
  2224. assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
  2225. "Vector elements must have same size");
  2226. // Do a direct cast if element types are castable.
  2227. if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
  2228. return Builder.CreateBitOrPointerCast(V, DstVTy);
  2229. }
  2230. // V cannot be directly casted to desired vector type.
  2231. // May happen when V is a floating point vector but DstVTy is a vector of
  2232. // pointers or vice-versa. Handle this using a two-step bitcast using an
  2233. // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
  2234. assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
  2235. "Only one type should be a pointer type");
  2236. assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
  2237. "Only one type should be a floating point type");
  2238. Type *IntTy =
  2239. IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
  2240. VectorType *VecIntTy = VectorType::get(IntTy, VF);
  2241. Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
  2242. return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
  2243. }
  2244. void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
  2245. BasicBlock *Bypass) {
  2246. Value *Count = getOrCreateTripCount(L);
  2247. BasicBlock *BB = L->getLoopPreheader();
  2248. IRBuilder<> Builder(BB->getTerminator());
  2249. // Generate code to check if the loop's trip count is less than VF * UF, or
  2250. // equal to it in case a scalar epilogue is required; this implies that the
  2251. // vector trip count is zero. This check also covers the case where adding one
  2252. // to the backedge-taken count overflowed leading to an incorrect trip count
  2253. // of zero. In this case we will also jump to the scalar loop.
  2254. auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
  2255. : ICmpInst::ICMP_ULT;
  2256. // If tail is to be folded, vector loop takes care of all iterations.
  2257. Value *CheckMinIters = Builder.getFalse();
  2258. if (!Cost->foldTailByMasking())
  2259. CheckMinIters = Builder.CreateICmp(
  2260. P, Count, ConstantInt::get(Count->getType(), VF * UF),
  2261. "min.iters.check");
  2262. BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
  2263. // Update dominator tree immediately if the generated block is a
  2264. // LoopBypassBlock because SCEV expansions to generate loop bypass
  2265. // checks may query it before the current function is finished.
  2266. DT->addNewBlock(NewBB, BB);
  2267. if (L->getParentLoop())
  2268. L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
  2269. ReplaceInstWithInst(BB->getTerminator(),
  2270. BranchInst::Create(Bypass, NewBB, CheckMinIters));
  2271. LoopBypassBlocks.push_back(BB);
  2272. }
  2273. void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
  2274. BasicBlock *BB = L->getLoopPreheader();
  2275. // Generate the code to check that the SCEV assumptions that we made.
  2276. // We want the new basic block to start at the first instruction in a
  2277. // sequence of instructions that form a check.
  2278. SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
  2279. "scev.check");
  2280. Value *SCEVCheck =
  2281. Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
  2282. if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
  2283. if (C->isZero())
  2284. return;
  2285. assert(!BB->getParent()->hasOptSize() &&
  2286. "Cannot SCEV check stride or overflow when optimizing for size");
  2287. // Create a new block containing the stride check.
  2288. BB->setName("vector.scevcheck");
  2289. auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
  2290. // Update dominator tree immediately if the generated block is a
  2291. // LoopBypassBlock because SCEV expansions to generate loop bypass
  2292. // checks may query it before the current function is finished.
  2293. DT->addNewBlock(NewBB, BB);
  2294. if (L->getParentLoop())
  2295. L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
  2296. ReplaceInstWithInst(BB->getTerminator(),
  2297. BranchInst::Create(Bypass, NewBB, SCEVCheck));
  2298. LoopBypassBlocks.push_back(BB);
  2299. AddedSafetyChecks = true;
  2300. }
  2301. void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
  2302. // VPlan-native path does not do any analysis for runtime checks currently.
  2303. if (EnableVPlanNativePath)
  2304. return;
  2305. BasicBlock *BB = L->getLoopPreheader();
  2306. // Generate the code that checks in runtime if arrays overlap. We put the
  2307. // checks into a separate block to make the more common case of few elements
  2308. // faster.
  2309. Instruction *FirstCheckInst;
  2310. Instruction *MemRuntimeCheck;
  2311. std::tie(FirstCheckInst, MemRuntimeCheck) =
  2312. Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
  2313. if (!MemRuntimeCheck)
  2314. return;
  2315. assert(!BB->getParent()->hasOptSize() &&
  2316. "Cannot emit memory checks when optimizing for size");
  2317. // Create a new block containing the memory check.
  2318. BB->setName("vector.memcheck");
  2319. auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
  2320. // Update dominator tree immediately if the generated block is a
  2321. // LoopBypassBlock because SCEV expansions to generate loop bypass
  2322. // checks may query it before the current function is finished.
  2323. DT->addNewBlock(NewBB, BB);
  2324. if (L->getParentLoop())
  2325. L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
  2326. ReplaceInstWithInst(BB->getTerminator(),
  2327. BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
  2328. LoopBypassBlocks.push_back(BB);
  2329. AddedSafetyChecks = true;
  2330. // We currently don't use LoopVersioning for the actual loop cloning but we
  2331. // still use it to add the noalias metadata.
  2332. LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
  2333. PSE.getSE());
  2334. LVer->prepareNoAliasMetadata();
  2335. }
  2336. Value *InnerLoopVectorizer::emitTransformedIndex(
  2337. IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
  2338. const InductionDescriptor &ID) const {
  2339. SCEVExpander Exp(*SE, DL, "induction");
  2340. auto Step = ID.getStep();
  2341. auto StartValue = ID.getStartValue();
  2342. assert(Index->getType() == Step->getType() &&
  2343. "Index type does not match StepValue type");
  2344. // Note: the IR at this point is broken. We cannot use SE to create any new
  2345. // SCEV and then expand it, hoping that SCEV's simplification will give us
  2346. // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
  2347. // lead to various SCEV crashes. So all we can do is to use builder and rely
  2348. // on InstCombine for future simplifications. Here we handle some trivial
  2349. // cases only.
  2350. auto CreateAdd = [&B](Value *X, Value *Y) {
  2351. assert(X->getType() == Y->getType() && "Types don't match!");
  2352. if (auto *CX = dyn_cast<ConstantInt>(X))
  2353. if (CX->isZero())
  2354. return Y;
  2355. if (auto *CY = dyn_cast<ConstantInt>(Y))
  2356. if (CY->isZero())
  2357. return X;
  2358. return B.CreateAdd(X, Y);
  2359. };
  2360. auto CreateMul = [&B](Value *X, Value *Y) {
  2361. assert(X->getType() == Y->getType() && "Types don't match!");
  2362. if (auto *CX = dyn_cast<ConstantInt>(X))
  2363. if (CX->isOne())
  2364. return Y;
  2365. if (auto *CY = dyn_cast<ConstantInt>(Y))
  2366. if (CY->isOne())
  2367. return X;
  2368. return B.CreateMul(X, Y);
  2369. };
  2370. switch (ID.getKind()) {
  2371. case InductionDescriptor::IK_IntInduction: {
  2372. assert(Index->getType() == StartValue->getType() &&
  2373. "Index type does not match StartValue type");
  2374. if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
  2375. return B.CreateSub(StartValue, Index);
  2376. auto *Offset = CreateMul(
  2377. Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint()));
  2378. return CreateAdd(StartValue, Offset);
  2379. }
  2380. case InductionDescriptor::IK_PtrInduction: {
  2381. assert(isa<SCEVConstant>(Step) &&
  2382. "Expected constant step for pointer induction");
  2383. return B.CreateGEP(
  2384. StartValue->getType()->getPointerElementType(), StartValue,
  2385. CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(),
  2386. &*B.GetInsertPoint())));
  2387. }
  2388. case InductionDescriptor::IK_FpInduction: {
  2389. assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
  2390. auto InductionBinOp = ID.getInductionBinOp();
  2391. assert(InductionBinOp &&
  2392. (InductionBinOp->getOpcode() == Instruction::FAdd ||
  2393. InductionBinOp->getOpcode() == Instruction::FSub) &&
  2394. "Original bin op should be defined for FP induction");
  2395. Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
  2396. // Floating point operations had to be 'fast' to enable the induction.
  2397. FastMathFlags Flags;
  2398. Flags.setFast();
  2399. Value *MulExp = B.CreateFMul(StepValue, Index);
  2400. if (isa<Instruction>(MulExp))
  2401. // We have to check, the MulExp may be a constant.
  2402. cast<Instruction>(MulExp)->setFastMathFlags(Flags);
  2403. Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
  2404. "induction");
  2405. if (isa<Instruction>(BOp))
  2406. cast<Instruction>(BOp)->setFastMathFlags(Flags);
  2407. return BOp;
  2408. }
  2409. case InductionDescriptor::IK_NoInduction:
  2410. return nullptr;
  2411. }
  2412. llvm_unreachable("invalid enum");
  2413. }
  2414. BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
  2415. /*
  2416. In this function we generate a new loop. The new loop will contain
  2417. the vectorized instructions while the old loop will continue to run the
  2418. scalar remainder.
  2419. [ ] <-- loop iteration number check.
  2420. / |
  2421. / v
  2422. | [ ] <-- vector loop bypass (may consist of multiple blocks).
  2423. | / |
  2424. | / v
  2425. || [ ] <-- vector pre header.
  2426. |/ |
  2427. | v
  2428. | [ ] \
  2429. | [ ]_| <-- vector loop.
  2430. | |
  2431. | v
  2432. | -[ ] <--- middle-block.
  2433. | / |
  2434. | / v
  2435. -|- >[ ] <--- new preheader.
  2436. | |
  2437. | v
  2438. | [ ] \
  2439. | [ ]_| <-- old scalar loop to handle remainder.
  2440. \ |
  2441. \ v
  2442. >[ ] <-- exit block.
  2443. ...
  2444. */
  2445. BasicBlock *OldBasicBlock = OrigLoop->getHeader();
  2446. BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
  2447. BasicBlock *ExitBlock = OrigLoop->getExitBlock();
  2448. MDNode *OrigLoopID = OrigLoop->getLoopID();
  2449. assert(VectorPH && "Invalid loop structure");
  2450. assert(ExitBlock && "Must have an exit block");
  2451. // Some loops have a single integer induction variable, while other loops
  2452. // don't. One example is c++ iterators that often have multiple pointer
  2453. // induction variables. In the code below we also support a case where we
  2454. // don't have a single induction variable.
  2455. //
  2456. // We try to obtain an induction variable from the original loop as hard
  2457. // as possible. However if we don't find one that:
  2458. // - is an integer
  2459. // - counts from zero, stepping by one
  2460. // - is the size of the widest induction variable type
  2461. // then we create a new one.
  2462. OldInduction = Legal->getPrimaryInduction();
  2463. Type *IdxTy = Legal->getWidestInductionType();
  2464. // Split the single block loop into the two loop structure described above.
  2465. BasicBlock *VecBody =
  2466. VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
  2467. BasicBlock *MiddleBlock =
  2468. VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
  2469. BasicBlock *ScalarPH =
  2470. MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
  2471. // Create and register the new vector loop.
  2472. Loop *Lp = LI->AllocateLoop();
  2473. Loop *ParentLoop = OrigLoop->getParentLoop();
  2474. // Insert the new loop into the loop nest and register the new basic blocks
  2475. // before calling any utilities such as SCEV that require valid LoopInfo.
  2476. if (ParentLoop) {
  2477. ParentLoop->addChildLoop(Lp);
  2478. ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
  2479. ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
  2480. } else {
  2481. LI->addTopLevelLoop(Lp);
  2482. }
  2483. Lp->addBasicBlockToLoop(VecBody, *LI);
  2484. // Find the loop boundaries.
  2485. Value *Count = getOrCreateTripCount(Lp);
  2486. Value *StartIdx = ConstantInt::get(IdxTy, 0);
  2487. // Now, compare the new count to zero. If it is zero skip the vector loop and
  2488. // jump to the scalar loop. This check also covers the case where the
  2489. // backedge-taken count is uint##_max: adding one to it will overflow leading
  2490. // to an incorrect trip count of zero. In this (rare) case we will also jump
  2491. // to the scalar loop.
  2492. emitMinimumIterationCountCheck(Lp, ScalarPH);
  2493. // Generate the code to check any assumptions that we've made for SCEV
  2494. // expressions.
  2495. emitSCEVChecks(Lp, ScalarPH);
  2496. // Generate the code that checks in runtime if arrays overlap. We put the
  2497. // checks into a separate block to make the more common case of few elements
  2498. // faster.
  2499. emitMemRuntimeChecks(Lp, ScalarPH);
  2500. // Generate the induction variable.
  2501. // The loop step is equal to the vectorization factor (num of SIMD elements)
  2502. // times the unroll factor (num of SIMD instructions).
  2503. Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
  2504. Constant *Step = ConstantInt::get(IdxTy, VF * UF);
  2505. Induction =
  2506. createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
  2507. getDebugLocFromInstOrOperands(OldInduction));
  2508. // We are going to resume the execution of the scalar loop.
  2509. // Go over all of the induction variables that we found and fix the
  2510. // PHIs that are left in the scalar version of the loop.
  2511. // The starting values of PHI nodes depend on the counter of the last
  2512. // iteration in the vectorized loop.
  2513. // If we come from a bypass edge then we need to start from the original
  2514. // start value.
  2515. // This variable saves the new starting index for the scalar loop. It is used
  2516. // to test if there are any tail iterations left once the vector loop has
  2517. // completed.
  2518. LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
  2519. for (auto &InductionEntry : *List) {
  2520. PHINode *OrigPhi = InductionEntry.first;
  2521. InductionDescriptor II = InductionEntry.second;
  2522. // Create phi nodes to merge from the backedge-taken check block.
  2523. PHINode *BCResumeVal = PHINode::Create(
  2524. OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
  2525. // Copy original phi DL over to the new one.
  2526. BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
  2527. Value *&EndValue = IVEndValues[OrigPhi];
  2528. if (OrigPhi == OldInduction) {
  2529. // We know what the end value is.
  2530. EndValue = CountRoundDown;
  2531. } else {
  2532. IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
  2533. Type *StepType = II.getStep()->getType();
  2534. Instruction::CastOps CastOp =
  2535. CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
  2536. Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
  2537. const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
  2538. EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
  2539. EndValue->setName("ind.end");
  2540. }
  2541. // The new PHI merges the original incoming value, in case of a bypass,
  2542. // or the value at the end of the vectorized loop.
  2543. BCResumeVal->addIncoming(EndValue, MiddleBlock);
  2544. // Fix the scalar body counter (PHI node).
  2545. // The old induction's phi node in the scalar body needs the truncated
  2546. // value.
  2547. for (BasicBlock *BB : LoopBypassBlocks)
  2548. BCResumeVal->addIncoming(II.getStartValue(), BB);
  2549. OrigPhi->setIncomingValueForBlock(ScalarPH, BCResumeVal);
  2550. }
  2551. // We need the OrigLoop (scalar loop part) latch terminator to help
  2552. // produce correct debug info for the middle block BB instructions.
  2553. // The legality check stage guarantees that the loop will have a single
  2554. // latch.
  2555. assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
  2556. "Scalar loop latch terminator isn't a branch");
  2557. BranchInst *ScalarLatchBr =
  2558. cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
  2559. // Add a check in the middle block to see if we have completed
  2560. // all of the iterations in the first vector loop.
  2561. // If (N - N%VF) == N, then we *don't* need to run the remainder.
  2562. // If tail is to be folded, we know we don't need to run the remainder.
  2563. Value *CmpN = Builder.getTrue();
  2564. if (!Cost->foldTailByMasking()) {
  2565. CmpN =
  2566. CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
  2567. CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
  2568. // Here we use the same DebugLoc as the scalar loop latch branch instead
  2569. // of the corresponding compare because they may have ended up with
  2570. // different line numbers and we want to avoid awkward line stepping while
  2571. // debugging. Eg. if the compare has got a line number inside the loop.
  2572. cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
  2573. }
  2574. BranchInst *BrInst = BranchInst::Create(ExitBlock, ScalarPH, CmpN);
  2575. BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
  2576. ReplaceInstWithInst(MiddleBlock->getTerminator(), BrInst);
  2577. // Get ready to start creating new instructions into the vectorized body.
  2578. Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
  2579. // Save the state.
  2580. LoopVectorPreHeader = Lp->getLoopPreheader();
  2581. LoopScalarPreHeader = ScalarPH;
  2582. LoopMiddleBlock = MiddleBlock;
  2583. LoopExitBlock = ExitBlock;
  2584. LoopVectorBody = VecBody;
  2585. LoopScalarBody = OldBasicBlock;
  2586. Optional<MDNode *> VectorizedLoopID =
  2587. makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
  2588. LLVMLoopVectorizeFollowupVectorized});
  2589. if (VectorizedLoopID.hasValue()) {
  2590. Lp->setLoopID(VectorizedLoopID.getValue());
  2591. // Do not setAlreadyVectorized if loop attributes have been defined
  2592. // explicitly.
  2593. return LoopVectorPreHeader;
  2594. }
  2595. // Keep all loop hints from the original loop on the vector loop (we'll
  2596. // replace the vectorizer-specific hints below).
  2597. if (MDNode *LID = OrigLoop->getLoopID())
  2598. Lp->setLoopID(LID);
  2599. LoopVectorizeHints Hints(Lp, true, *ORE);
  2600. Hints.setAlreadyVectorized();
  2601. return LoopVectorPreHeader;
  2602. }
  2603. // Fix up external users of the induction variable. At this point, we are
  2604. // in LCSSA form, with all external PHIs that use the IV having one input value,
  2605. // coming from the remainder loop. We need those PHIs to also have a correct
  2606. // value for the IV when arriving directly from the middle block.
  2607. void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
  2608. const InductionDescriptor &II,
  2609. Value *CountRoundDown, Value *EndValue,
  2610. BasicBlock *MiddleBlock) {
  2611. // There are two kinds of external IV usages - those that use the value
  2612. // computed in the last iteration (the PHI) and those that use the penultimate
  2613. // value (the value that feeds into the phi from the loop latch).
  2614. // We allow both, but they, obviously, have different values.
  2615. assert(OrigLoop->getExitBlock() && "Expected a single exit block");
  2616. DenseMap<Value *, Value *> MissingVals;
  2617. // An external user of the last iteration's value should see the value that
  2618. // the remainder loop uses to initialize its own IV.
  2619. Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
  2620. for (User *U : PostInc->users()) {
  2621. Instruction *UI = cast<Instruction>(U);
  2622. if (!OrigLoop->contains(UI)) {
  2623. assert(isa<PHINode>(UI) && "Expected LCSSA form");
  2624. MissingVals[UI] = EndValue;
  2625. }
  2626. }
  2627. // An external user of the penultimate value need to see EndValue - Step.
  2628. // The simplest way to get this is to recompute it from the constituent SCEVs,
  2629. // that is Start + (Step * (CRD - 1)).
  2630. for (User *U : OrigPhi->users()) {
  2631. auto *UI = cast<Instruction>(U);
  2632. if (!OrigLoop->contains(UI)) {
  2633. const DataLayout &DL =
  2634. OrigLoop->getHeader()->getModule()->getDataLayout();
  2635. assert(isa<PHINode>(UI) && "Expected LCSSA form");
  2636. IRBuilder<> B(MiddleBlock->getTerminator());
  2637. Value *CountMinusOne = B.CreateSub(
  2638. CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
  2639. Value *CMO =
  2640. !II.getStep()->getType()->isIntegerTy()
  2641. ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
  2642. II.getStep()->getType())
  2643. : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
  2644. CMO->setName("cast.cmo");
  2645. Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
  2646. Escape->setName("ind.escape");
  2647. MissingVals[UI] = Escape;
  2648. }
  2649. }
  2650. for (auto &I : MissingVals) {
  2651. PHINode *PHI = cast<PHINode>(I.first);
  2652. // One corner case we have to handle is two IVs "chasing" each-other,
  2653. // that is %IV2 = phi [...], [ %IV1, %latch ]
  2654. // In this case, if IV1 has an external use, we need to avoid adding both
  2655. // "last value of IV1" and "penultimate value of IV2". So, verify that we
  2656. // don't already have an incoming value for the middle block.
  2657. if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
  2658. PHI->addIncoming(I.second, MiddleBlock);
  2659. }
  2660. }
  2661. namespace {
  2662. struct CSEDenseMapInfo {
  2663. static bool canHandle(const Instruction *I) {
  2664. return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
  2665. isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
  2666. }
  2667. static inline Instruction *getEmptyKey() {
  2668. return DenseMapInfo<Instruction *>::getEmptyKey();
  2669. }
  2670. static inline Instruction *getTombstoneKey() {
  2671. return DenseMapInfo<Instruction *>::getTombstoneKey();
  2672. }
  2673. static unsigned getHashValue(const Instruction *I) {
  2674. assert(canHandle(I) && "Unknown instruction!");
  2675. return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
  2676. I->value_op_end()));
  2677. }
  2678. static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
  2679. if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
  2680. LHS == getTombstoneKey() || RHS == getTombstoneKey())
  2681. return LHS == RHS;
  2682. return LHS->isIdenticalTo(RHS);
  2683. }
  2684. };
  2685. } // end anonymous namespace
  2686. ///Perform cse of induction variable instructions.
  2687. static void cse(BasicBlock *BB) {
  2688. // Perform simple cse.
  2689. SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
  2690. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
  2691. Instruction *In = &*I++;
  2692. if (!CSEDenseMapInfo::canHandle(In))
  2693. continue;
  2694. // Check if we can replace this instruction with any of the
  2695. // visited instructions.
  2696. if (Instruction *V = CSEMap.lookup(In)) {
  2697. In->replaceAllUsesWith(V);
  2698. In->eraseFromParent();
  2699. continue;
  2700. }
  2701. CSEMap[In] = In;
  2702. }
  2703. }
  2704. unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
  2705. unsigned VF,
  2706. bool &NeedToScalarize) {
  2707. Function *F = CI->getCalledFunction();
  2708. StringRef FnName = CI->getCalledFunction()->getName();
  2709. Type *ScalarRetTy = CI->getType();
  2710. SmallVector<Type *, 4> Tys, ScalarTys;
  2711. for (auto &ArgOp : CI->arg_operands())
  2712. ScalarTys.push_back(ArgOp->getType());
  2713. // Estimate cost of scalarized vector call. The source operands are assumed
  2714. // to be vectors, so we need to extract individual elements from there,
  2715. // execute VF scalar calls, and then gather the result into the vector return
  2716. // value.
  2717. unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
  2718. if (VF == 1)
  2719. return ScalarCallCost;
  2720. // Compute corresponding vector type for return value and arguments.
  2721. Type *RetTy = ToVectorTy(ScalarRetTy, VF);
  2722. for (Type *ScalarTy : ScalarTys)
  2723. Tys.push_back(ToVectorTy(ScalarTy, VF));
  2724. // Compute costs of unpacking argument values for the scalar calls and
  2725. // packing the return values to a vector.
  2726. unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
  2727. unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
  2728. // If we can't emit a vector call for this function, then the currently found
  2729. // cost is the cost we need to return.
  2730. NeedToScalarize = true;
  2731. if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
  2732. return Cost;
  2733. // If the corresponding vector cost is cheaper, return its cost.
  2734. unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
  2735. if (VectorCallCost < Cost) {
  2736. NeedToScalarize = false;
  2737. return VectorCallCost;
  2738. }
  2739. return Cost;
  2740. }
  2741. unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
  2742. unsigned VF) {
  2743. Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
  2744. assert(ID && "Expected intrinsic call!");
  2745. FastMathFlags FMF;
  2746. if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
  2747. FMF = FPMO->getFastMathFlags();
  2748. SmallVector<Value *, 4> Operands(CI->arg_operands());
  2749. return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
  2750. }
  2751. static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
  2752. auto *I1 = cast<IntegerType>(T1->getVectorElementType());
  2753. auto *I2 = cast<IntegerType>(T2->getVectorElementType());
  2754. return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
  2755. }
  2756. static Type *largestIntegerVectorType(Type *T1, Type *T2) {
  2757. auto *I1 = cast<IntegerType>(T1->getVectorElementType());
  2758. auto *I2 = cast<IntegerType>(T2->getVectorElementType());
  2759. return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
  2760. }
  2761. void InnerLoopVectorizer::truncateToMinimalBitwidths() {
  2762. // For every instruction `I` in MinBWs, truncate the operands, create a
  2763. // truncated version of `I` and reextend its result. InstCombine runs
  2764. // later and will remove any ext/trunc pairs.
  2765. SmallPtrSet<Value *, 4> Erased;
  2766. for (const auto &KV : Cost->getMinimalBitwidths()) {
  2767. // If the value wasn't vectorized, we must maintain the original scalar
  2768. // type. The absence of the value from VectorLoopValueMap indicates that it
  2769. // wasn't vectorized.
  2770. if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
  2771. continue;
  2772. for (unsigned Part = 0; Part < UF; ++Part) {
  2773. Value *I = getOrCreateVectorValue(KV.first, Part);
  2774. if (Erased.find(I) != Erased.end() || I->use_empty() ||
  2775. !isa<Instruction>(I))
  2776. continue;
  2777. Type *OriginalTy = I->getType();
  2778. Type *ScalarTruncatedTy =
  2779. IntegerType::get(OriginalTy->getContext(), KV.second);
  2780. Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
  2781. OriginalTy->getVectorNumElements());
  2782. if (TruncatedTy == OriginalTy)
  2783. continue;
  2784. IRBuilder<> B(cast<Instruction>(I));
  2785. auto ShrinkOperand = [&](Value *V) -> Value * {
  2786. if (auto *ZI = dyn_cast<ZExtInst>(V))
  2787. if (ZI->getSrcTy() == TruncatedTy)
  2788. return ZI->getOperand(0);
  2789. return B.CreateZExtOrTrunc(V, TruncatedTy);
  2790. };
  2791. // The actual instruction modification depends on the instruction type,
  2792. // unfortunately.
  2793. Value *NewI = nullptr;
  2794. if (auto *BO = dyn_cast<BinaryOperator>(I)) {
  2795. NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
  2796. ShrinkOperand(BO->getOperand(1)));
  2797. // Any wrapping introduced by shrinking this operation shouldn't be
  2798. // considered undefined behavior. So, we can't unconditionally copy
  2799. // arithmetic wrapping flags to NewI.
  2800. cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
  2801. } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
  2802. NewI =
  2803. B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
  2804. ShrinkOperand(CI->getOperand(1)));
  2805. } else if (auto *SI = dyn_cast<SelectInst>(I)) {
  2806. NewI = B.CreateSelect(SI->getCondition(),
  2807. ShrinkOperand(SI->getTrueValue()),
  2808. ShrinkOperand(SI->getFalseValue()));
  2809. } else if (auto *CI = dyn_cast<CastInst>(I)) {
  2810. switch (CI->getOpcode()) {
  2811. default:
  2812. llvm_unreachable("Unhandled cast!");
  2813. case Instruction::Trunc:
  2814. NewI = ShrinkOperand(CI->getOperand(0));
  2815. break;
  2816. case Instruction::SExt:
  2817. NewI = B.CreateSExtOrTrunc(
  2818. CI->getOperand(0),
  2819. smallestIntegerVectorType(OriginalTy, TruncatedTy));
  2820. break;
  2821. case Instruction::ZExt:
  2822. NewI = B.CreateZExtOrTrunc(
  2823. CI->getOperand(0),
  2824. smallestIntegerVectorType(OriginalTy, TruncatedTy));
  2825. break;
  2826. }
  2827. } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
  2828. auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
  2829. auto *O0 = B.CreateZExtOrTrunc(
  2830. SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
  2831. auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
  2832. auto *O1 = B.CreateZExtOrTrunc(
  2833. SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
  2834. NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
  2835. } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
  2836. // Don't do anything with the operands, just extend the result.
  2837. continue;
  2838. } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
  2839. auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
  2840. auto *O0 = B.CreateZExtOrTrunc(
  2841. IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
  2842. auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
  2843. NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
  2844. } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
  2845. auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
  2846. auto *O0 = B.CreateZExtOrTrunc(
  2847. EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
  2848. NewI = B.CreateExtractElement(O0, EE->getOperand(2));
  2849. } else {
  2850. // If we don't know what to do, be conservative and don't do anything.
  2851. continue;
  2852. }
  2853. // Lastly, extend the result.
  2854. NewI->takeName(cast<Instruction>(I));
  2855. Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
  2856. I->replaceAllUsesWith(Res);
  2857. cast<Instruction>(I)->eraseFromParent();
  2858. Erased.insert(I);
  2859. VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
  2860. }
  2861. }
  2862. // We'll have created a bunch of ZExts that are now parentless. Clean up.
  2863. for (const auto &KV : Cost->getMinimalBitwidths()) {
  2864. // If the value wasn't vectorized, we must maintain the original scalar
  2865. // type. The absence of the value from VectorLoopValueMap indicates that it
  2866. // wasn't vectorized.
  2867. if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
  2868. continue;
  2869. for (unsigned Part = 0; Part < UF; ++Part) {
  2870. Value *I = getOrCreateVectorValue(KV.first, Part);
  2871. ZExtInst *Inst = dyn_cast<ZExtInst>(I);
  2872. if (Inst && Inst->use_empty()) {
  2873. Value *NewI = Inst->getOperand(0);
  2874. Inst->eraseFromParent();
  2875. VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
  2876. }
  2877. }
  2878. }
  2879. }
  2880. void InnerLoopVectorizer::fixVectorizedLoop() {
  2881. // Insert truncates and extends for any truncated instructions as hints to
  2882. // InstCombine.
  2883. if (VF > 1)
  2884. truncateToMinimalBitwidths();
  2885. // Fix widened non-induction PHIs by setting up the PHI operands.
  2886. if (OrigPHIsToFix.size()) {
  2887. assert(EnableVPlanNativePath &&
  2888. "Unexpected non-induction PHIs for fixup in non VPlan-native path");
  2889. fixNonInductionPHIs();
  2890. }
  2891. // At this point every instruction in the original loop is widened to a
  2892. // vector form. Now we need to fix the recurrences in the loop. These PHI
  2893. // nodes are currently empty because we did not want to introduce cycles.
  2894. // This is the second stage of vectorizing recurrences.
  2895. fixCrossIterationPHIs();
  2896. // Update the dominator tree.
  2897. //
  2898. // FIXME: After creating the structure of the new loop, the dominator tree is
  2899. // no longer up-to-date, and it remains that way until we update it
  2900. // here. An out-of-date dominator tree is problematic for SCEV,
  2901. // because SCEVExpander uses it to guide code generation. The
  2902. // vectorizer use SCEVExpanders in several places. Instead, we should
  2903. // keep the dominator tree up-to-date as we go.
  2904. updateAnalysis();
  2905. // Fix-up external users of the induction variables.
  2906. for (auto &Entry : *Legal->getInductionVars())
  2907. fixupIVUsers(Entry.first, Entry.second,
  2908. getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
  2909. IVEndValues[Entry.first], LoopMiddleBlock);
  2910. fixLCSSAPHIs();
  2911. for (Instruction *PI : PredicatedInstructions)
  2912. sinkScalarOperands(&*PI);
  2913. // Remove redundant induction instructions.
  2914. cse(LoopVectorBody);
  2915. }
  2916. void InnerLoopVectorizer::fixCrossIterationPHIs() {
  2917. // In order to support recurrences we need to be able to vectorize Phi nodes.
  2918. // Phi nodes have cycles, so we need to vectorize them in two stages. This is
  2919. // stage #2: We now need to fix the recurrences by adding incoming edges to
  2920. // the currently empty PHI nodes. At this point every instruction in the
  2921. // original loop is widened to a vector form so we can use them to construct
  2922. // the incoming edges.
  2923. for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
  2924. // Handle first-order recurrences and reductions that need to be fixed.
  2925. if (Legal->isFirstOrderRecurrence(&Phi))
  2926. fixFirstOrderRecurrence(&Phi);
  2927. else if (Legal->isReductionVariable(&Phi))
  2928. fixReduction(&Phi);
  2929. }
  2930. }
  2931. void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
  2932. // This is the second phase of vectorizing first-order recurrences. An
  2933. // overview of the transformation is described below. Suppose we have the
  2934. // following loop.
  2935. //
  2936. // for (int i = 0; i < n; ++i)
  2937. // b[i] = a[i] - a[i - 1];
  2938. //
  2939. // There is a first-order recurrence on "a". For this loop, the shorthand
  2940. // scalar IR looks like:
  2941. //
  2942. // scalar.ph:
  2943. // s_init = a[-1]
  2944. // br scalar.body
  2945. //
  2946. // scalar.body:
  2947. // i = phi [0, scalar.ph], [i+1, scalar.body]
  2948. // s1 = phi [s_init, scalar.ph], [s2, scalar.body]
  2949. // s2 = a[i]
  2950. // b[i] = s2 - s1
  2951. // br cond, scalar.body, ...
  2952. //
  2953. // In this example, s1 is a recurrence because it's value depends on the
  2954. // previous iteration. In the first phase of vectorization, we created a
  2955. // temporary value for s1. We now complete the vectorization and produce the
  2956. // shorthand vector IR shown below (for VF = 4, UF = 1).
  2957. //
  2958. // vector.ph:
  2959. // v_init = vector(..., ..., ..., a[-1])
  2960. // br vector.body
  2961. //
  2962. // vector.body
  2963. // i = phi [0, vector.ph], [i+4, vector.body]
  2964. // v1 = phi [v_init, vector.ph], [v2, vector.body]
  2965. // v2 = a[i, i+1, i+2, i+3];
  2966. // v3 = vector(v1(3), v2(0, 1, 2))
  2967. // b[i, i+1, i+2, i+3] = v2 - v3
  2968. // br cond, vector.body, middle.block
  2969. //
  2970. // middle.block:
  2971. // x = v2(3)
  2972. // br scalar.ph
  2973. //
  2974. // scalar.ph:
  2975. // s_init = phi [x, middle.block], [a[-1], otherwise]
  2976. // br scalar.body
  2977. //
  2978. // After execution completes the vector loop, we extract the next value of
  2979. // the recurrence (x) to use as the initial value in the scalar loop.
  2980. // Get the original loop preheader and single loop latch.
  2981. auto *Preheader = OrigLoop->getLoopPreheader();
  2982. auto *Latch = OrigLoop->getLoopLatch();
  2983. // Get the initial and previous values of the scalar recurrence.
  2984. auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
  2985. auto *Previous = Phi->getIncomingValueForBlock(Latch);
  2986. // Create a vector from the initial value.
  2987. auto *VectorInit = ScalarInit;
  2988. if (VF > 1) {
  2989. Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
  2990. VectorInit = Builder.CreateInsertElement(
  2991. UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
  2992. Builder.getInt32(VF - 1), "vector.recur.init");
  2993. }
  2994. // We constructed a temporary phi node in the first phase of vectorization.
  2995. // This phi node will eventually be deleted.
  2996. Builder.SetInsertPoint(
  2997. cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
  2998. // Create a phi node for the new recurrence. The current value will either be
  2999. // the initial value inserted into a vector or loop-varying vector value.
  3000. auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
  3001. VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
  3002. // Get the vectorized previous value of the last part UF - 1. It appears last
  3003. // among all unrolled iterations, due to the order of their construction.
  3004. Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
  3005. // Set the insertion point after the previous value if it is an instruction.
  3006. // Note that the previous value may have been constant-folded so it is not
  3007. // guaranteed to be an instruction in the vector loop. Also, if the previous
  3008. // value is a phi node, we should insert after all the phi nodes to avoid
  3009. // breaking basic block verification.
  3010. if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) ||
  3011. isa<PHINode>(PreviousLastPart))
  3012. Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
  3013. else
  3014. Builder.SetInsertPoint(
  3015. &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart)));
  3016. // We will construct a vector for the recurrence by combining the values for
  3017. // the current and previous iterations. This is the required shuffle mask.
  3018. SmallVector<Constant *, 8> ShuffleMask(VF);
  3019. ShuffleMask[0] = Builder.getInt32(VF - 1);
  3020. for (unsigned I = 1; I < VF; ++I)
  3021. ShuffleMask[I] = Builder.getInt32(I + VF - 1);
  3022. // The vector from which to take the initial value for the current iteration
  3023. // (actual or unrolled). Initially, this is the vector phi node.
  3024. Value *Incoming = VecPhi;
  3025. // Shuffle the current and previous vector and update the vector parts.
  3026. for (unsigned Part = 0; Part < UF; ++Part) {
  3027. Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
  3028. Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
  3029. auto *Shuffle =
  3030. VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
  3031. ConstantVector::get(ShuffleMask))
  3032. : Incoming;
  3033. PhiPart->replaceAllUsesWith(Shuffle);
  3034. cast<Instruction>(PhiPart)->eraseFromParent();
  3035. VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
  3036. Incoming = PreviousPart;
  3037. }
  3038. // Fix the latch value of the new recurrence in the vector loop.
  3039. VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
  3040. // Extract the last vector element in the middle block. This will be the
  3041. // initial value for the recurrence when jumping to the scalar loop.
  3042. auto *ExtractForScalar = Incoming;
  3043. if (VF > 1) {
  3044. Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
  3045. ExtractForScalar = Builder.CreateExtractElement(
  3046. ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
  3047. }
  3048. // Extract the second last element in the middle block if the
  3049. // Phi is used outside the loop. We need to extract the phi itself
  3050. // and not the last element (the phi update in the current iteration). This
  3051. // will be the value when jumping to the exit block from the LoopMiddleBlock,
  3052. // when the scalar loop is not run at all.
  3053. Value *ExtractForPhiUsedOutsideLoop = nullptr;
  3054. if (VF > 1)
  3055. ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
  3056. Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
  3057. // When loop is unrolled without vectorizing, initialize
  3058. // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
  3059. // `Incoming`. This is analogous to the vectorized case above: extracting the
  3060. // second last element when VF > 1.
  3061. else if (UF > 1)
  3062. ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
  3063. // Fix the initial value of the original recurrence in the scalar loop.
  3064. Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
  3065. auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
  3066. for (auto *BB : predecessors(LoopScalarPreHeader)) {
  3067. auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
  3068. Start->addIncoming(Incoming, BB);
  3069. }
  3070. Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
  3071. Phi->setName("scalar.recur");
  3072. // Finally, fix users of the recurrence outside the loop. The users will need
  3073. // either the last value of the scalar recurrence or the last value of the
  3074. // vector recurrence we extracted in the middle block. Since the loop is in
  3075. // LCSSA form, we just need to find all the phi nodes for the original scalar
  3076. // recurrence in the exit block, and then add an edge for the middle block.
  3077. for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
  3078. if (LCSSAPhi.getIncomingValue(0) == Phi) {
  3079. LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
  3080. }
  3081. }
  3082. }
  3083. void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
  3084. Constant *Zero = Builder.getInt32(0);
  3085. // Get it's reduction variable descriptor.
  3086. assert(Legal->isReductionVariable(Phi) &&
  3087. "Unable to find the reduction variable");
  3088. RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
  3089. RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
  3090. TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
  3091. Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
  3092. RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
  3093. RdxDesc.getMinMaxRecurrenceKind();
  3094. setDebugLocFromInst(Builder, ReductionStartValue);
  3095. // We need to generate a reduction vector from the incoming scalar.
  3096. // To do so, we need to generate the 'identity' vector and override
  3097. // one of the elements with the incoming scalar reduction. We need
  3098. // to do it in the vector-loop preheader.
  3099. Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
  3100. // This is the vector-clone of the value that leaves the loop.
  3101. Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
  3102. // Find the reduction identity variable. Zero for addition, or, xor,
  3103. // one for multiplication, -1 for And.
  3104. Value *Identity;
  3105. Value *VectorStart;
  3106. if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
  3107. RK == RecurrenceDescriptor::RK_FloatMinMax) {
  3108. // MinMax reduction have the start value as their identify.
  3109. if (VF == 1) {
  3110. VectorStart = Identity = ReductionStartValue;
  3111. } else {
  3112. VectorStart = Identity =
  3113. Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
  3114. }
  3115. } else {
  3116. // Handle other reduction kinds:
  3117. Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
  3118. RK, VecTy->getScalarType());
  3119. if (VF == 1) {
  3120. Identity = Iden;
  3121. // This vector is the Identity vector where the first element is the
  3122. // incoming scalar reduction.
  3123. VectorStart = ReductionStartValue;
  3124. } else {
  3125. Identity = ConstantVector::getSplat(VF, Iden);
  3126. // This vector is the Identity vector where the first element is the
  3127. // incoming scalar reduction.
  3128. VectorStart =
  3129. Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
  3130. }
  3131. }
  3132. // Fix the vector-loop phi.
  3133. // Reductions do not have to start at zero. They can start with
  3134. // any loop invariant values.
  3135. BasicBlock *Latch = OrigLoop->getLoopLatch();
  3136. Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
  3137. for (unsigned Part = 0; Part < UF; ++Part) {
  3138. Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
  3139. Value *Val = getOrCreateVectorValue(LoopVal, Part);
  3140. // Make sure to add the reduction stat value only to the
  3141. // first unroll part.
  3142. Value *StartVal = (Part == 0) ? VectorStart : Identity;
  3143. cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
  3144. cast<PHINode>(VecRdxPhi)
  3145. ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
  3146. }
  3147. // Before each round, move the insertion point right between
  3148. // the PHIs and the values we are going to write.
  3149. // This allows us to write both PHINodes and the extractelement
  3150. // instructions.
  3151. Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
  3152. setDebugLocFromInst(Builder, LoopExitInst);
  3153. // If tail is folded by masking, the vector value to leave the loop should be
  3154. // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
  3155. // instead of the former.
  3156. if (Cost->foldTailByMasking()) {
  3157. for (unsigned Part = 0; Part < UF; ++Part) {
  3158. Value *VecLoopExitInst =
  3159. VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
  3160. Value *Sel = nullptr;
  3161. for (User *U : VecLoopExitInst->users()) {
  3162. if (isa<SelectInst>(U)) {
  3163. assert(!Sel && "Reduction exit feeding two selects");
  3164. Sel = U;
  3165. } else
  3166. assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
  3167. }
  3168. assert(Sel && "Reduction exit feeds no select");
  3169. VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
  3170. }
  3171. }
  3172. // If the vector reduction can be performed in a smaller type, we truncate
  3173. // then extend the loop exit value to enable InstCombine to evaluate the
  3174. // entire expression in the smaller type.
  3175. if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
  3176. Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
  3177. Builder.SetInsertPoint(
  3178. LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
  3179. VectorParts RdxParts(UF);
  3180. for (unsigned Part = 0; Part < UF; ++Part) {
  3181. RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
  3182. Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
  3183. Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
  3184. : Builder.CreateZExt(Trunc, VecTy);
  3185. for (Value::user_iterator UI = RdxParts[Part]->user_begin();
  3186. UI != RdxParts[Part]->user_end();)
  3187. if (*UI != Trunc) {
  3188. (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
  3189. RdxParts[Part] = Extnd;
  3190. } else {
  3191. ++UI;
  3192. }
  3193. }
  3194. Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
  3195. for (unsigned Part = 0; Part < UF; ++Part) {
  3196. RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
  3197. VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
  3198. }
  3199. }
  3200. // Reduce all of the unrolled parts into a single vector.
  3201. Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
  3202. unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
  3203. // The middle block terminator has already been assigned a DebugLoc here (the
  3204. // OrigLoop's single latch terminator). We want the whole middle block to
  3205. // appear to execute on this line because: (a) it is all compiler generated,
  3206. // (b) these instructions are always executed after evaluating the latch
  3207. // conditional branch, and (c) other passes may add new predecessors which
  3208. // terminate on this line. This is the easiest way to ensure we don't
  3209. // accidentally cause an extra step back into the loop while debugging.
  3210. setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
  3211. for (unsigned Part = 1; Part < UF; ++Part) {
  3212. Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
  3213. if (Op != Instruction::ICmp && Op != Instruction::FCmp)
  3214. // Floating point operations had to be 'fast' to enable the reduction.
  3215. ReducedPartRdx = addFastMathFlag(
  3216. Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
  3217. ReducedPartRdx, "bin.rdx"),
  3218. RdxDesc.getFastMathFlags());
  3219. else
  3220. ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
  3221. RdxPart);
  3222. }
  3223. if (VF > 1) {
  3224. bool NoNaN = Legal->hasFunNoNaNAttr();
  3225. ReducedPartRdx =
  3226. createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
  3227. // If the reduction can be performed in a smaller type, we need to extend
  3228. // the reduction to the wider type before we branch to the original loop.
  3229. if (Phi->getType() != RdxDesc.getRecurrenceType())
  3230. ReducedPartRdx =
  3231. RdxDesc.isSigned()
  3232. ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
  3233. : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
  3234. }
  3235. // Create a phi node that merges control-flow from the backedge-taken check
  3236. // block and the middle block.
  3237. PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
  3238. LoopScalarPreHeader->getTerminator());
  3239. for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
  3240. BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
  3241. BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
  3242. // Now, we need to fix the users of the reduction variable
  3243. // inside and outside of the scalar remainder loop.
  3244. // We know that the loop is in LCSSA form. We need to update the
  3245. // PHI nodes in the exit blocks.
  3246. for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
  3247. // All PHINodes need to have a single entry edge, or two if
  3248. // we already fixed them.
  3249. assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
  3250. // We found a reduction value exit-PHI. Update it with the
  3251. // incoming bypass edge.
  3252. if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
  3253. LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
  3254. } // end of the LCSSA phi scan.
  3255. // Fix the scalar loop reduction variable with the incoming reduction sum
  3256. // from the vector body and from the backedge value.
  3257. int IncomingEdgeBlockIdx =
  3258. Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
  3259. assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
  3260. // Pick the other block.
  3261. int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
  3262. Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
  3263. Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
  3264. }
  3265. void InnerLoopVectorizer::fixLCSSAPHIs() {
  3266. for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
  3267. if (LCSSAPhi.getNumIncomingValues() == 1) {
  3268. auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
  3269. // Non-instruction incoming values will have only one value.
  3270. unsigned LastLane = 0;
  3271. if (isa<Instruction>(IncomingValue))
  3272. LastLane = Cost->isUniformAfterVectorization(
  3273. cast<Instruction>(IncomingValue), VF)
  3274. ? 0
  3275. : VF - 1;
  3276. // Can be a loop invariant incoming value or the last scalar value to be
  3277. // extracted from the vectorized loop.
  3278. Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
  3279. Value *lastIncomingValue =
  3280. getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
  3281. LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
  3282. }
  3283. }
  3284. }
  3285. void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
  3286. // The basic block and loop containing the predicated instruction.
  3287. auto *PredBB = PredInst->getParent();
  3288. auto *VectorLoop = LI->getLoopFor(PredBB);
  3289. // Initialize a worklist with the operands of the predicated instruction.
  3290. SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
  3291. // Holds instructions that we need to analyze again. An instruction may be
  3292. // reanalyzed if we don't yet know if we can sink it or not.
  3293. SmallVector<Instruction *, 8> InstsToReanalyze;
  3294. // Returns true if a given use occurs in the predicated block. Phi nodes use
  3295. // their operands in their corresponding predecessor blocks.
  3296. auto isBlockOfUsePredicated = [&](Use &U) -> bool {
  3297. auto *I = cast<Instruction>(U.getUser());
  3298. BasicBlock *BB = I->getParent();
  3299. if (auto *Phi = dyn_cast<PHINode>(I))
  3300. BB = Phi->getIncomingBlock(
  3301. PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
  3302. return BB == PredBB;
  3303. };
  3304. // Iteratively sink the scalarized operands of the predicated instruction
  3305. // into the block we created for it. When an instruction is sunk, it's
  3306. // operands are then added to the worklist. The algorithm ends after one pass
  3307. // through the worklist doesn't sink a single instruction.
  3308. bool Changed;
  3309. do {
  3310. // Add the instructions that need to be reanalyzed to the worklist, and
  3311. // reset the changed indicator.
  3312. Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
  3313. InstsToReanalyze.clear();
  3314. Changed = false;
  3315. while (!Worklist.empty()) {
  3316. auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
  3317. // We can't sink an instruction if it is a phi node, is already in the
  3318. // predicated block, is not in the loop, or may have side effects.
  3319. if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
  3320. !VectorLoop->contains(I) || I->mayHaveSideEffects())
  3321. continue;
  3322. // It's legal to sink the instruction if all its uses occur in the
  3323. // predicated block. Otherwise, there's nothing to do yet, and we may
  3324. // need to reanalyze the instruction.
  3325. if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
  3326. InstsToReanalyze.push_back(I);
  3327. continue;
  3328. }
  3329. // Move the instruction to the beginning of the predicated block, and add
  3330. // it's operands to the worklist.
  3331. I->moveBefore(&*PredBB->getFirstInsertionPt());
  3332. Worklist.insert(I->op_begin(), I->op_end());
  3333. // The sinking may have enabled other instructions to be sunk, so we will
  3334. // need to iterate.
  3335. Changed = true;
  3336. }
  3337. } while (Changed);
  3338. }
  3339. void InnerLoopVectorizer::fixNonInductionPHIs() {
  3340. for (PHINode *OrigPhi : OrigPHIsToFix) {
  3341. PHINode *NewPhi =
  3342. cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
  3343. unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
  3344. SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
  3345. predecessors(OrigPhi->getParent()));
  3346. SmallVector<BasicBlock *, 2> VectorBBPredecessors(
  3347. predecessors(NewPhi->getParent()));
  3348. assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
  3349. "Scalar and Vector BB should have the same number of predecessors");
  3350. // The insertion point in Builder may be invalidated by the time we get
  3351. // here. Force the Builder insertion point to something valid so that we do
  3352. // not run into issues during insertion point restore in
  3353. // getOrCreateVectorValue calls below.
  3354. Builder.SetInsertPoint(NewPhi);
  3355. // The predecessor order is preserved and we can rely on mapping between
  3356. // scalar and vector block predecessors.
  3357. for (unsigned i = 0; i < NumIncomingValues; ++i) {
  3358. BasicBlock *NewPredBB = VectorBBPredecessors[i];
  3359. // When looking up the new scalar/vector values to fix up, use incoming
  3360. // values from original phi.
  3361. Value *ScIncV =
  3362. OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
  3363. // Scalar incoming value may need a broadcast
  3364. Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
  3365. NewPhi->addIncoming(NewIncV, NewPredBB);
  3366. }
  3367. }
  3368. }
  3369. void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
  3370. unsigned VF) {
  3371. PHINode *P = cast<PHINode>(PN);
  3372. if (EnableVPlanNativePath) {
  3373. // Currently we enter here in the VPlan-native path for non-induction
  3374. // PHIs where all control flow is uniform. We simply widen these PHIs.
  3375. // Create a vector phi with no operands - the vector phi operands will be
  3376. // set at the end of vector code generation.
  3377. Type *VecTy =
  3378. (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
  3379. Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
  3380. VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
  3381. OrigPHIsToFix.push_back(P);
  3382. return;
  3383. }
  3384. assert(PN->getParent() == OrigLoop->getHeader() &&
  3385. "Non-header phis should have been handled elsewhere");
  3386. // In order to support recurrences we need to be able to vectorize Phi nodes.
  3387. // Phi nodes have cycles, so we need to vectorize them in two stages. This is
  3388. // stage #1: We create a new vector PHI node with no incoming edges. We'll use
  3389. // this value when we vectorize all of the instructions that use the PHI.
  3390. if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
  3391. for (unsigned Part = 0; Part < UF; ++Part) {
  3392. // This is phase one of vectorizing PHIs.
  3393. Type *VecTy =
  3394. (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
  3395. Value *EntryPart = PHINode::Create(
  3396. VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
  3397. VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
  3398. }
  3399. return;
  3400. }
  3401. setDebugLocFromInst(Builder, P);
  3402. // This PHINode must be an induction variable.
  3403. // Make sure that we know about it.
  3404. assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
  3405. InductionDescriptor II = Legal->getInductionVars()->lookup(P);
  3406. const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
  3407. // FIXME: The newly created binary instructions should contain nsw/nuw flags,
  3408. // which can be found from the original scalar operations.
  3409. switch (II.getKind()) {
  3410. case InductionDescriptor::IK_NoInduction:
  3411. llvm_unreachable("Unknown induction");
  3412. case InductionDescriptor::IK_IntInduction:
  3413. case InductionDescriptor::IK_FpInduction:
  3414. llvm_unreachable("Integer/fp induction is handled elsewhere.");
  3415. case InductionDescriptor::IK_PtrInduction: {
  3416. // Handle the pointer induction variable case.
  3417. assert(P->getType()->isPointerTy() && "Unexpected type.");
  3418. // This is the normalized GEP that starts counting at zero.
  3419. Value *PtrInd = Induction;
  3420. PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
  3421. // Determine the number of scalars we need to generate for each unroll
  3422. // iteration. If the instruction is uniform, we only need to generate the
  3423. // first lane. Otherwise, we generate all VF values.
  3424. unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
  3425. // These are the scalar results. Notice that we don't generate vector GEPs
  3426. // because scalar GEPs result in better code.
  3427. for (unsigned Part = 0; Part < UF; ++Part) {
  3428. for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
  3429. Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
  3430. Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
  3431. Value *SclrGep =
  3432. emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
  3433. SclrGep->setName("next.gep");
  3434. VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
  3435. }
  3436. }
  3437. return;
  3438. }
  3439. }
  3440. }
  3441. /// A helper function for checking whether an integer division-related
  3442. /// instruction may divide by zero (in which case it must be predicated if
  3443. /// executed conditionally in the scalar code).
  3444. /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
  3445. /// Non-zero divisors that are non compile-time constants will not be
  3446. /// converted into multiplication, so we will still end up scalarizing
  3447. /// the division, but can do so w/o predication.
  3448. static bool mayDivideByZero(Instruction &I) {
  3449. assert((I.getOpcode() == Instruction::UDiv ||
  3450. I.getOpcode() == Instruction::SDiv ||
  3451. I.getOpcode() == Instruction::URem ||
  3452. I.getOpcode() == Instruction::SRem) &&
  3453. "Unexpected instruction");
  3454. Value *Divisor = I.getOperand(1);
  3455. auto *CInt = dyn_cast<ConstantInt>(Divisor);
  3456. return !CInt || CInt->isZero();
  3457. }
  3458. void InnerLoopVectorizer::widenInstruction(Instruction &I) {
  3459. switch (I.getOpcode()) {
  3460. case Instruction::Br:
  3461. case Instruction::PHI:
  3462. llvm_unreachable("This instruction is handled by a different recipe.");
  3463. case Instruction::GetElementPtr: {
  3464. // Construct a vector GEP by widening the operands of the scalar GEP as
  3465. // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
  3466. // results in a vector of pointers when at least one operand of the GEP
  3467. // is vector-typed. Thus, to keep the representation compact, we only use
  3468. // vector-typed operands for loop-varying values.
  3469. auto *GEP = cast<GetElementPtrInst>(&I);
  3470. if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) {
  3471. // If we are vectorizing, but the GEP has only loop-invariant operands,
  3472. // the GEP we build (by only using vector-typed operands for
  3473. // loop-varying values) would be a scalar pointer. Thus, to ensure we
  3474. // produce a vector of pointers, we need to either arbitrarily pick an
  3475. // operand to broadcast, or broadcast a clone of the original GEP.
  3476. // Here, we broadcast a clone of the original.
  3477. //
  3478. // TODO: If at some point we decide to scalarize instructions having
  3479. // loop-invariant operands, this special case will no longer be
  3480. // required. We would add the scalarization decision to
  3481. // collectLoopScalars() and teach getVectorValue() to broadcast
  3482. // the lane-zero scalar value.
  3483. auto *Clone = Builder.Insert(GEP->clone());
  3484. for (unsigned Part = 0; Part < UF; ++Part) {
  3485. Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
  3486. VectorLoopValueMap.setVectorValue(&I, Part, EntryPart);
  3487. addMetadata(EntryPart, GEP);
  3488. }
  3489. } else {
  3490. // If the GEP has at least one loop-varying operand, we are sure to
  3491. // produce a vector of pointers. But if we are only unrolling, we want
  3492. // to produce a scalar GEP for each unroll part. Thus, the GEP we
  3493. // produce with the code below will be scalar (if VF == 1) or vector
  3494. // (otherwise). Note that for the unroll-only case, we still maintain
  3495. // values in the vector mapping with initVector, as we do for other
  3496. // instructions.
  3497. for (unsigned Part = 0; Part < UF; ++Part) {
  3498. // The pointer operand of the new GEP. If it's loop-invariant, we
  3499. // won't broadcast it.
  3500. auto *Ptr =
  3501. OrigLoop->isLoopInvariant(GEP->getPointerOperand())
  3502. ? GEP->getPointerOperand()
  3503. : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
  3504. // Collect all the indices for the new GEP. If any index is
  3505. // loop-invariant, we won't broadcast it.
  3506. SmallVector<Value *, 4> Indices;
  3507. for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) {
  3508. if (OrigLoop->isLoopInvariant(U.get()))
  3509. Indices.push_back(U.get());
  3510. else
  3511. Indices.push_back(getOrCreateVectorValue(U.get(), Part));
  3512. }
  3513. // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
  3514. // but it should be a vector, otherwise.
  3515. auto *NewGEP =
  3516. GEP->isInBounds()
  3517. ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
  3518. Indices)
  3519. : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
  3520. assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
  3521. "NewGEP is not a pointer vector");
  3522. VectorLoopValueMap.setVectorValue(&I, Part, NewGEP);
  3523. addMetadata(NewGEP, GEP);
  3524. }
  3525. }
  3526. break;
  3527. }
  3528. case Instruction::UDiv:
  3529. case Instruction::SDiv:
  3530. case Instruction::SRem:
  3531. case Instruction::URem:
  3532. case Instruction::Add:
  3533. case Instruction::FAdd:
  3534. case Instruction::Sub:
  3535. case Instruction::FSub:
  3536. case Instruction::FNeg:
  3537. case Instruction::Mul:
  3538. case Instruction::FMul:
  3539. case Instruction::FDiv:
  3540. case Instruction::FRem:
  3541. case Instruction::Shl:
  3542. case Instruction::LShr:
  3543. case Instruction::AShr:
  3544. case Instruction::And:
  3545. case Instruction::Or:
  3546. case Instruction::Xor: {
  3547. // Just widen unops and binops.
  3548. setDebugLocFromInst(Builder, &I);
  3549. for (unsigned Part = 0; Part < UF; ++Part) {
  3550. SmallVector<Value *, 2> Ops;
  3551. for (Value *Op : I.operands())
  3552. Ops.push_back(getOrCreateVectorValue(Op, Part));
  3553. Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
  3554. if (auto *VecOp = dyn_cast<Instruction>(V))
  3555. VecOp->copyIRFlags(&I);
  3556. // Use this vector value for all users of the original instruction.
  3557. VectorLoopValueMap.setVectorValue(&I, Part, V);
  3558. addMetadata(V, &I);
  3559. }
  3560. break;
  3561. }
  3562. case Instruction::Select: {
  3563. // Widen selects.
  3564. // If the selector is loop invariant we can create a select
  3565. // instruction with a scalar condition. Otherwise, use vector-select.
  3566. auto *SE = PSE.getSE();
  3567. bool InvariantCond =
  3568. SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
  3569. setDebugLocFromInst(Builder, &I);
  3570. // The condition can be loop invariant but still defined inside the
  3571. // loop. This means that we can't just use the original 'cond' value.
  3572. // We have to take the 'vectorized' value and pick the first lane.
  3573. // Instcombine will make this a no-op.
  3574. auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
  3575. for (unsigned Part = 0; Part < UF; ++Part) {
  3576. Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
  3577. Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
  3578. Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
  3579. Value *Sel =
  3580. Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
  3581. VectorLoopValueMap.setVectorValue(&I, Part, Sel);
  3582. addMetadata(Sel, &I);
  3583. }
  3584. break;
  3585. }
  3586. case Instruction::ICmp:
  3587. case Instruction::FCmp: {
  3588. // Widen compares. Generate vector compares.
  3589. bool FCmp = (I.getOpcode() == Instruction::FCmp);
  3590. auto *Cmp = dyn_cast<CmpInst>(&I);
  3591. setDebugLocFromInst(Builder, Cmp);
  3592. for (unsigned Part = 0; Part < UF; ++Part) {
  3593. Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
  3594. Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
  3595. Value *C = nullptr;
  3596. if (FCmp) {
  3597. // Propagate fast math flags.
  3598. IRBuilder<>::FastMathFlagGuard FMFG(Builder);
  3599. Builder.setFastMathFlags(Cmp->getFastMathFlags());
  3600. C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
  3601. } else {
  3602. C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
  3603. }
  3604. VectorLoopValueMap.setVectorValue(&I, Part, C);
  3605. addMetadata(C, &I);
  3606. }
  3607. break;
  3608. }
  3609. case Instruction::ZExt:
  3610. case Instruction::SExt:
  3611. case Instruction::FPToUI:
  3612. case Instruction::FPToSI:
  3613. case Instruction::FPExt:
  3614. case Instruction::PtrToInt:
  3615. case Instruction::IntToPtr:
  3616. case Instruction::SIToFP:
  3617. case Instruction::UIToFP:
  3618. case Instruction::Trunc:
  3619. case Instruction::FPTrunc:
  3620. case Instruction::BitCast: {
  3621. auto *CI = dyn_cast<CastInst>(&I);
  3622. setDebugLocFromInst(Builder, CI);
  3623. /// Vectorize casts.
  3624. Type *DestTy =
  3625. (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
  3626. for (unsigned Part = 0; Part < UF; ++Part) {
  3627. Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
  3628. Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
  3629. VectorLoopValueMap.setVectorValue(&I, Part, Cast);
  3630. addMetadata(Cast, &I);
  3631. }
  3632. break;
  3633. }
  3634. case Instruction::Call: {
  3635. // Ignore dbg intrinsics.
  3636. if (isa<DbgInfoIntrinsic>(I))
  3637. break;
  3638. setDebugLocFromInst(Builder, &I);
  3639. Module *M = I.getParent()->getParent()->getParent();
  3640. auto *CI = cast<CallInst>(&I);
  3641. StringRef FnName = CI->getCalledFunction()->getName();
  3642. Function *F = CI->getCalledFunction();
  3643. Type *RetTy = ToVectorTy(CI->getType(), VF);
  3644. SmallVector<Type *, 4> Tys;
  3645. for (Value *ArgOperand : CI->arg_operands())
  3646. Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
  3647. Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
  3648. // The flag shows whether we use Intrinsic or a usual Call for vectorized
  3649. // version of the instruction.
  3650. // Is it beneficial to perform intrinsic call compared to lib call?
  3651. bool NeedToScalarize;
  3652. unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
  3653. bool UseVectorIntrinsic =
  3654. ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
  3655. assert((UseVectorIntrinsic || !NeedToScalarize) &&
  3656. "Instruction should be scalarized elsewhere.");
  3657. for (unsigned Part = 0; Part < UF; ++Part) {
  3658. SmallVector<Value *, 4> Args;
  3659. for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
  3660. Value *Arg = CI->getArgOperand(i);
  3661. // Some intrinsics have a scalar argument - don't replace it with a
  3662. // vector.
  3663. if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i))
  3664. Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part);
  3665. Args.push_back(Arg);
  3666. }
  3667. Function *VectorF;
  3668. if (UseVectorIntrinsic) {
  3669. // Use vector version of the intrinsic.
  3670. Type *TysForDecl[] = {CI->getType()};
  3671. if (VF > 1)
  3672. TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
  3673. VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
  3674. } else {
  3675. // Use vector version of the library call.
  3676. StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
  3677. assert(!VFnName.empty() && "Vector function name is empty.");
  3678. VectorF = M->getFunction(VFnName);
  3679. if (!VectorF) {
  3680. // Generate a declaration
  3681. FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
  3682. VectorF =
  3683. Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
  3684. VectorF->copyAttributesFrom(F);
  3685. }
  3686. }
  3687. assert(VectorF && "Can't create vector function.");
  3688. SmallVector<OperandBundleDef, 1> OpBundles;
  3689. CI->getOperandBundlesAsDefs(OpBundles);
  3690. CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
  3691. if (isa<FPMathOperator>(V))
  3692. V->copyFastMathFlags(CI);
  3693. VectorLoopValueMap.setVectorValue(&I, Part, V);
  3694. addMetadata(V, &I);
  3695. }
  3696. break;
  3697. }
  3698. default:
  3699. // This instruction is not vectorized by simple widening.
  3700. LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
  3701. llvm_unreachable("Unhandled instruction!");
  3702. } // end of switch.
  3703. }
  3704. void InnerLoopVectorizer::updateAnalysis() {
  3705. // Forget the original basic block.
  3706. PSE.getSE()->forgetLoop(OrigLoop);
  3707. // DT is not kept up-to-date for outer loop vectorization
  3708. if (EnableVPlanNativePath)
  3709. return;
  3710. // Update the dominator tree information.
  3711. assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
  3712. "Entry does not dominate exit.");
  3713. DT->addNewBlock(LoopMiddleBlock,
  3714. LI->getLoopFor(LoopVectorBody)->getLoopLatch());
  3715. DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
  3716. DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
  3717. DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
  3718. assert(DT->verify(DominatorTree::VerificationLevel::Fast));
  3719. }
  3720. void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
  3721. // We should not collect Scalars more than once per VF. Right now, this
  3722. // function is called from collectUniformsAndScalars(), which already does
  3723. // this check. Collecting Scalars for VF=1 does not make any sense.
  3724. assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
  3725. "This function should not be visited twice for the same VF");
  3726. SmallSetVector<Instruction *, 8> Worklist;
  3727. // These sets are used to seed the analysis with pointers used by memory
  3728. // accesses that will remain scalar.
  3729. SmallSetVector<Instruction *, 8> ScalarPtrs;
  3730. SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
  3731. // A helper that returns true if the use of Ptr by MemAccess will be scalar.
  3732. // The pointer operands of loads and stores will be scalar as long as the
  3733. // memory access is not a gather or scatter operation. The value operand of a
  3734. // store will remain scalar if the store is scalarized.
  3735. auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
  3736. InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
  3737. assert(WideningDecision != CM_Unknown &&
  3738. "Widening decision should be ready at this moment");
  3739. if (auto *Store = dyn_cast<StoreInst>(MemAccess))
  3740. if (Ptr == Store->getValueOperand())
  3741. return WideningDecision == CM_Scalarize;
  3742. assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
  3743. "Ptr is neither a value or pointer operand");
  3744. return WideningDecision != CM_GatherScatter;
  3745. };
  3746. // A helper that returns true if the given value is a bitcast or
  3747. // getelementptr instruction contained in the loop.
  3748. auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
  3749. return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
  3750. isa<GetElementPtrInst>(V)) &&
  3751. !TheLoop->isLoopInvariant(V);
  3752. };
  3753. // A helper that evaluates a memory access's use of a pointer. If the use
  3754. // will be a scalar use, and the pointer is only used by memory accesses, we
  3755. // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
  3756. // PossibleNonScalarPtrs.
  3757. auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
  3758. // We only care about bitcast and getelementptr instructions contained in
  3759. // the loop.
  3760. if (!isLoopVaryingBitCastOrGEP(Ptr))
  3761. return;
  3762. // If the pointer has already been identified as scalar (e.g., if it was
  3763. // also identified as uniform), there's nothing to do.
  3764. auto *I = cast<Instruction>(Ptr);
  3765. if (Worklist.count(I))
  3766. return;
  3767. // If the use of the pointer will be a scalar use, and all users of the
  3768. // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
  3769. // place the pointer in PossibleNonScalarPtrs.
  3770. if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
  3771. return isa<LoadInst>(U) || isa<StoreInst>(U);
  3772. }))
  3773. ScalarPtrs.insert(I);
  3774. else
  3775. PossibleNonScalarPtrs.insert(I);
  3776. };
  3777. // We seed the scalars analysis with three classes of instructions: (1)
  3778. // instructions marked uniform-after-vectorization, (2) bitcast and
  3779. // getelementptr instructions used by memory accesses requiring a scalar use,
  3780. // and (3) pointer induction variables and their update instructions (we
  3781. // currently only scalarize these).
  3782. //
  3783. // (1) Add to the worklist all instructions that have been identified as
  3784. // uniform-after-vectorization.
  3785. Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
  3786. // (2) Add to the worklist all bitcast and getelementptr instructions used by
  3787. // memory accesses requiring a scalar use. The pointer operands of loads and
  3788. // stores will be scalar as long as the memory accesses is not a gather or
  3789. // scatter operation. The value operand of a store will remain scalar if the
  3790. // store is scalarized.
  3791. for (auto *BB : TheLoop->blocks())
  3792. for (auto &I : *BB) {
  3793. if (auto *Load = dyn_cast<LoadInst>(&I)) {
  3794. evaluatePtrUse(Load, Load->getPointerOperand());
  3795. } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
  3796. evaluatePtrUse(Store, Store->getPointerOperand());
  3797. evaluatePtrUse(Store, Store->getValueOperand());
  3798. }
  3799. }
  3800. for (auto *I : ScalarPtrs)
  3801. if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) {
  3802. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
  3803. Worklist.insert(I);
  3804. }
  3805. // (3) Add to the worklist all pointer induction variables and their update
  3806. // instructions.
  3807. //
  3808. // TODO: Once we are able to vectorize pointer induction variables we should
  3809. // no longer insert them into the worklist here.
  3810. auto *Latch = TheLoop->getLoopLatch();
  3811. for (auto &Induction : *Legal->getInductionVars()) {
  3812. auto *Ind = Induction.first;
  3813. auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
  3814. if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
  3815. continue;
  3816. Worklist.insert(Ind);
  3817. Worklist.insert(IndUpdate);
  3818. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
  3819. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
  3820. << "\n");
  3821. }
  3822. // Insert the forced scalars.
  3823. // FIXME: Currently widenPHIInstruction() often creates a dead vector
  3824. // induction variable when the PHI user is scalarized.
  3825. auto ForcedScalar = ForcedScalars.find(VF);
  3826. if (ForcedScalar != ForcedScalars.end())
  3827. for (auto *I : ForcedScalar->second)
  3828. Worklist.insert(I);
  3829. // Expand the worklist by looking through any bitcasts and getelementptr
  3830. // instructions we've already identified as scalar. This is similar to the
  3831. // expansion step in collectLoopUniforms(); however, here we're only
  3832. // expanding to include additional bitcasts and getelementptr instructions.
  3833. unsigned Idx = 0;
  3834. while (Idx != Worklist.size()) {
  3835. Instruction *Dst = Worklist[Idx++];
  3836. if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
  3837. continue;
  3838. auto *Src = cast<Instruction>(Dst->getOperand(0));
  3839. if (llvm::all_of(Src->users(), [&](User *U) -> bool {
  3840. auto *J = cast<Instruction>(U);
  3841. return !TheLoop->contains(J) || Worklist.count(J) ||
  3842. ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
  3843. isScalarUse(J, Src));
  3844. })) {
  3845. Worklist.insert(Src);
  3846. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
  3847. }
  3848. }
  3849. // An induction variable will remain scalar if all users of the induction
  3850. // variable and induction variable update remain scalar.
  3851. for (auto &Induction : *Legal->getInductionVars()) {
  3852. auto *Ind = Induction.first;
  3853. auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
  3854. // We already considered pointer induction variables, so there's no reason
  3855. // to look at their users again.
  3856. //
  3857. // TODO: Once we are able to vectorize pointer induction variables we
  3858. // should no longer skip over them here.
  3859. if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
  3860. continue;
  3861. // Determine if all users of the induction variable are scalar after
  3862. // vectorization.
  3863. auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
  3864. auto *I = cast<Instruction>(U);
  3865. return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
  3866. });
  3867. if (!ScalarInd)
  3868. continue;
  3869. // Determine if all users of the induction variable update instruction are
  3870. // scalar after vectorization.
  3871. auto ScalarIndUpdate =
  3872. llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
  3873. auto *I = cast<Instruction>(U);
  3874. return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
  3875. });
  3876. if (!ScalarIndUpdate)
  3877. continue;
  3878. // The induction variable and its update instruction will remain scalar.
  3879. Worklist.insert(Ind);
  3880. Worklist.insert(IndUpdate);
  3881. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
  3882. LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
  3883. << "\n");
  3884. }
  3885. Scalars[VF].insert(Worklist.begin(), Worklist.end());
  3886. }
  3887. bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
  3888. if (!blockNeedsPredication(I->getParent()))
  3889. return false;
  3890. switch(I->getOpcode()) {
  3891. default:
  3892. break;
  3893. case Instruction::Load:
  3894. case Instruction::Store: {
  3895. if (!Legal->isMaskRequired(I))
  3896. return false;
  3897. auto *Ptr = getLoadStorePointerOperand(I);
  3898. auto *Ty = getMemInstValueType(I);
  3899. // We have already decided how to vectorize this instruction, get that
  3900. // result.
  3901. if (VF > 1) {
  3902. InstWidening WideningDecision = getWideningDecision(I, VF);
  3903. assert(WideningDecision != CM_Unknown &&
  3904. "Widening decision should be ready at this moment");
  3905. return WideningDecision == CM_Scalarize;
  3906. }
  3907. return isa<LoadInst>(I) ?
  3908. !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty))
  3909. : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty));
  3910. }
  3911. case Instruction::UDiv:
  3912. case Instruction::SDiv:
  3913. case Instruction::SRem:
  3914. case Instruction::URem:
  3915. return mayDivideByZero(*I);
  3916. }
  3917. return false;
  3918. }
  3919. bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
  3920. unsigned VF) {
  3921. assert(isAccessInterleaved(I) && "Expecting interleaved access.");
  3922. assert(getWideningDecision(I, VF) == CM_Unknown &&
  3923. "Decision should not be set yet.");
  3924. auto *Group = getInterleavedAccessGroup(I);
  3925. assert(Group && "Must have a group.");
  3926. // If the instruction's allocated size doesn't equal it's type size, it
  3927. // requires padding and will be scalarized.
  3928. auto &DL = I->getModule()->getDataLayout();
  3929. auto *ScalarTy = getMemInstValueType(I);
  3930. if (hasIrregularType(ScalarTy, DL, VF))
  3931. return false;
  3932. // Check if masking is required.
  3933. // A Group may need masking for one of two reasons: it resides in a block that
  3934. // needs predication, or it was decided to use masking to deal with gaps.
  3935. bool PredicatedAccessRequiresMasking =
  3936. Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
  3937. bool AccessWithGapsRequiresMasking =
  3938. Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
  3939. if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
  3940. return true;
  3941. // If masked interleaving is required, we expect that the user/target had
  3942. // enabled it, because otherwise it either wouldn't have been created or
  3943. // it should have been invalidated by the CostModel.
  3944. assert(useMaskedInterleavedAccesses(TTI) &&
  3945. "Masked interleave-groups for predicated accesses are not enabled.");
  3946. auto *Ty = getMemInstValueType(I);
  3947. return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty)
  3948. : TTI.isLegalMaskedStore(Ty);
  3949. }
  3950. bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
  3951. unsigned VF) {
  3952. // Get and ensure we have a valid memory instruction.
  3953. LoadInst *LI = dyn_cast<LoadInst>(I);
  3954. StoreInst *SI = dyn_cast<StoreInst>(I);
  3955. assert((LI || SI) && "Invalid memory instruction");
  3956. auto *Ptr = getLoadStorePointerOperand(I);
  3957. // In order to be widened, the pointer should be consecutive, first of all.
  3958. if (!Legal->isConsecutivePtr(Ptr))
  3959. return false;
  3960. // If the instruction is a store located in a predicated block, it will be
  3961. // scalarized.
  3962. if (isScalarWithPredication(I))
  3963. return false;
  3964. // If the instruction's allocated size doesn't equal it's type size, it
  3965. // requires padding and will be scalarized.
  3966. auto &DL = I->getModule()->getDataLayout();
  3967. auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
  3968. if (hasIrregularType(ScalarTy, DL, VF))
  3969. return false;
  3970. return true;
  3971. }
  3972. void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
  3973. // We should not collect Uniforms more than once per VF. Right now,
  3974. // this function is called from collectUniformsAndScalars(), which
  3975. // already does this check. Collecting Uniforms for VF=1 does not make any
  3976. // sense.
  3977. assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
  3978. "This function should not be visited twice for the same VF");
  3979. // Visit the list of Uniforms. If we'll not find any uniform value, we'll
  3980. // not analyze again. Uniforms.count(VF) will return 1.
  3981. Uniforms[VF].clear();
  3982. // We now know that the loop is vectorizable!
  3983. // Collect instructions inside the loop that will remain uniform after
  3984. // vectorization.
  3985. // Global values, params and instructions outside of current loop are out of
  3986. // scope.
  3987. auto isOutOfScope = [&](Value *V) -> bool {
  3988. Instruction *I = dyn_cast<Instruction>(V);
  3989. return (!I || !TheLoop->contains(I));
  3990. };
  3991. SetVector<Instruction *> Worklist;
  3992. BasicBlock *Latch = TheLoop->getLoopLatch();
  3993. // Start with the conditional branch. If the branch condition is an
  3994. // instruction contained in the loop that is only used by the branch, it is
  3995. // uniform.
  3996. auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
  3997. if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
  3998. Worklist.insert(Cmp);
  3999. LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
  4000. }
  4001. // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
  4002. // are pointers that are treated like consecutive pointers during
  4003. // vectorization. The pointer operands of interleaved accesses are an
  4004. // example.
  4005. SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
  4006. // Holds pointer operands of instructions that are possibly non-uniform.
  4007. SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
  4008. auto isUniformDecision = [&](Instruction *I, unsigned VF) {
  4009. InstWidening WideningDecision = getWideningDecision(I, VF);
  4010. assert(WideningDecision != CM_Unknown &&
  4011. "Widening decision should be ready at this moment");
  4012. return (WideningDecision == CM_Widen ||
  4013. WideningDecision == CM_Widen_Reverse ||
  4014. WideningDecision == CM_Interleave);
  4015. };
  4016. // Iterate over the instructions in the loop, and collect all
  4017. // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
  4018. // that a consecutive-like pointer operand will be scalarized, we collect it
  4019. // in PossibleNonUniformPtrs instead. We use two sets here because a single
  4020. // getelementptr instruction can be used by both vectorized and scalarized
  4021. // memory instructions. For example, if a loop loads and stores from the same
  4022. // location, but the store is conditional, the store will be scalarized, and
  4023. // the getelementptr won't remain uniform.
  4024. for (auto *BB : TheLoop->blocks())
  4025. for (auto &I : *BB) {
  4026. // If there's no pointer operand, there's nothing to do.
  4027. auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
  4028. if (!Ptr)
  4029. continue;
  4030. // True if all users of Ptr are memory accesses that have Ptr as their
  4031. // pointer operand.
  4032. auto UsersAreMemAccesses =
  4033. llvm::all_of(Ptr->users(), [&](User *U) -> bool {
  4034. return getLoadStorePointerOperand(U) == Ptr;
  4035. });
  4036. // Ensure the memory instruction will not be scalarized or used by
  4037. // gather/scatter, making its pointer operand non-uniform. If the pointer
  4038. // operand is used by any instruction other than a memory access, we
  4039. // conservatively assume the pointer operand may be non-uniform.
  4040. if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
  4041. PossibleNonUniformPtrs.insert(Ptr);
  4042. // If the memory instruction will be vectorized and its pointer operand
  4043. // is consecutive-like, or interleaving - the pointer operand should
  4044. // remain uniform.
  4045. else
  4046. ConsecutiveLikePtrs.insert(Ptr);
  4047. }
  4048. // Add to the Worklist all consecutive and consecutive-like pointers that
  4049. // aren't also identified as possibly non-uniform.
  4050. for (auto *V : ConsecutiveLikePtrs)
  4051. if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) {
  4052. LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
  4053. Worklist.insert(V);
  4054. }
  4055. // Expand Worklist in topological order: whenever a new instruction
  4056. // is added , its users should be already inside Worklist. It ensures
  4057. // a uniform instruction will only be used by uniform instructions.
  4058. unsigned idx = 0;
  4059. while (idx != Worklist.size()) {
  4060. Instruction *I = Worklist[idx++];
  4061. for (auto OV : I->operand_values()) {
  4062. // isOutOfScope operands cannot be uniform instructions.
  4063. if (isOutOfScope(OV))
  4064. continue;
  4065. // First order recurrence Phi's should typically be considered
  4066. // non-uniform.
  4067. auto *OP = dyn_cast<PHINode>(OV);
  4068. if (OP && Legal->isFirstOrderRecurrence(OP))
  4069. continue;
  4070. // If all the users of the operand are uniform, then add the
  4071. // operand into the uniform worklist.
  4072. auto *OI = cast<Instruction>(OV);
  4073. if (llvm::all_of(OI->users(), [&](User *U) -> bool {
  4074. auto *J = cast<Instruction>(U);
  4075. return Worklist.count(J) ||
  4076. (OI == getLoadStorePointerOperand(J) &&
  4077. isUniformDecision(J, VF));
  4078. })) {
  4079. Worklist.insert(OI);
  4080. LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
  4081. }
  4082. }
  4083. }
  4084. // Returns true if Ptr is the pointer operand of a memory access instruction
  4085. // I, and I is known to not require scalarization.
  4086. auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
  4087. return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
  4088. };
  4089. // For an instruction to be added into Worklist above, all its users inside
  4090. // the loop should also be in Worklist. However, this condition cannot be
  4091. // true for phi nodes that form a cyclic dependence. We must process phi
  4092. // nodes separately. An induction variable will remain uniform if all users
  4093. // of the induction variable and induction variable update remain uniform.
  4094. // The code below handles both pointer and non-pointer induction variables.
  4095. for (auto &Induction : *Legal->getInductionVars()) {
  4096. auto *Ind = Induction.first;
  4097. auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
  4098. // Determine if all users of the induction variable are uniform after
  4099. // vectorization.
  4100. auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
  4101. auto *I = cast<Instruction>(U);
  4102. return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
  4103. isVectorizedMemAccessUse(I, Ind);
  4104. });
  4105. if (!UniformInd)
  4106. continue;
  4107. // Determine if all users of the induction variable update instruction are
  4108. // uniform after vectorization.
  4109. auto UniformIndUpdate =
  4110. llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
  4111. auto *I = cast<Instruction>(U);
  4112. return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
  4113. isVectorizedMemAccessUse(I, IndUpdate);
  4114. });
  4115. if (!UniformIndUpdate)
  4116. continue;
  4117. // The induction variable and its update instruction will remain uniform.
  4118. Worklist.insert(Ind);
  4119. Worklist.insert(IndUpdate);
  4120. LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
  4121. LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate
  4122. << "\n");
  4123. }
  4124. Uniforms[VF].insert(Worklist.begin(), Worklist.end());
  4125. }
  4126. bool LoopVectorizationCostModel::runtimeChecksRequired() {
  4127. LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
  4128. if (Legal->getRuntimePointerChecking()->Need) {
  4129. reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
  4130. "runtime pointer checks needed. Enable vectorization of this "
  4131. "loop with '#pragma clang loop vectorize(enable)' when "
  4132. "compiling with -Os/-Oz",
  4133. "CantVersionLoopWithOptForSize", ORE, TheLoop);
  4134. return true;
  4135. }
  4136. if (!PSE.getUnionPredicate().getPredicates().empty()) {
  4137. reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
  4138. "runtime SCEV checks needed. Enable vectorization of this "
  4139. "loop with '#pragma clang loop vectorize(enable)' when "
  4140. "compiling with -Os/-Oz",
  4141. "CantVersionLoopWithOptForSize", ORE, TheLoop);
  4142. return true;
  4143. }
  4144. // FIXME: Avoid specializing for stride==1 instead of bailing out.
  4145. if (!Legal->getLAI()->getSymbolicStrides().empty()) {
  4146. reportVectorizationFailure("Runtime stride check is required with -Os/-Oz",
  4147. "runtime stride == 1 checks needed. Enable vectorization of "
  4148. "this loop with '#pragma clang loop vectorize(enable)' when "
  4149. "compiling with -Os/-Oz",
  4150. "CantVersionLoopWithOptForSize", ORE, TheLoop);
  4151. return true;
  4152. }
  4153. return false;
  4154. }
  4155. Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() {
  4156. if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
  4157. // TODO: It may by useful to do since it's still likely to be dynamically
  4158. // uniform if the target can skip.
  4159. reportVectorizationFailure(
  4160. "Not inserting runtime ptr check for divergent target",
  4161. "runtime pointer checks needed. Not enabled for divergent target",
  4162. "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
  4163. return None;
  4164. }
  4165. unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
  4166. LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
  4167. if (TC == 1) {
  4168. reportVectorizationFailure("Single iteration (non) loop",
  4169. "loop trip count is one, irrelevant for vectorization",
  4170. "SingleIterationLoop", ORE, TheLoop);
  4171. return None;
  4172. }
  4173. switch (ScalarEpilogueStatus) {
  4174. case CM_ScalarEpilogueAllowed:
  4175. return computeFeasibleMaxVF(TC);
  4176. case CM_ScalarEpilogueNotNeededUsePredicate:
  4177. LLVM_DEBUG(
  4178. dbgs() << "LV: vector predicate hint/switch found.\n"
  4179. << "LV: Not allowing scalar epilogue, creating predicated "
  4180. << "vector loop.\n");
  4181. break;
  4182. case CM_ScalarEpilogueNotAllowedLowTripLoop:
  4183. // fallthrough as a special case of OptForSize
  4184. case CM_ScalarEpilogueNotAllowedOptSize:
  4185. if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
  4186. LLVM_DEBUG(
  4187. dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
  4188. else
  4189. LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
  4190. << "count.\n");
  4191. // Bail if runtime checks are required, which are not good when optimising
  4192. // for size.
  4193. if (runtimeChecksRequired())
  4194. return None;
  4195. break;
  4196. }
  4197. // Now try the tail folding
  4198. // Invalidate interleave groups that require an epilogue if we can't mask
  4199. // the interleave-group.
  4200. if (!useMaskedInterleavedAccesses(TTI))
  4201. InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
  4202. unsigned MaxVF = computeFeasibleMaxVF(TC);
  4203. if (TC > 0 && TC % MaxVF == 0) {
  4204. // Accept MaxVF if we do not have a tail.
  4205. LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
  4206. return MaxVF;
  4207. }
  4208. // If we don't know the precise trip count, or if the trip count that we
  4209. // found modulo the vectorization factor is not zero, try to fold the tail
  4210. // by masking.
  4211. // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
  4212. if (Legal->prepareToFoldTailByMasking()) {
  4213. FoldTailByMasking = true;
  4214. return MaxVF;
  4215. }
  4216. if (TC == 0) {
  4217. reportVectorizationFailure(
  4218. "Unable to calculate the loop count due to complex control flow",
  4219. "unable to calculate the loop count due to complex control flow",
  4220. "UnknownLoopCountComplexCFG", ORE, TheLoop);
  4221. return None;
  4222. }
  4223. reportVectorizationFailure(
  4224. "Cannot optimize for size and vectorize at the same time.",
  4225. "cannot optimize for size and vectorize at the same time. "
  4226. "Enable vectorization of this loop with '#pragma clang loop "
  4227. "vectorize(enable)' when compiling with -Os/-Oz",
  4228. "NoTailLoopWithOptForSize", ORE, TheLoop);
  4229. return None;
  4230. }
  4231. unsigned
  4232. LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
  4233. MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
  4234. unsigned SmallestType, WidestType;
  4235. std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
  4236. unsigned WidestRegister = TTI.getRegisterBitWidth(true);
  4237. // Get the maximum safe dependence distance in bits computed by LAA.
  4238. // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
  4239. // the memory accesses that is most restrictive (involved in the smallest
  4240. // dependence distance).
  4241. unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
  4242. WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
  4243. unsigned MaxVectorSize = WidestRegister / WidestType;
  4244. LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
  4245. << " / " << WidestType << " bits.\n");
  4246. LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
  4247. << WidestRegister << " bits.\n");
  4248. assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
  4249. " into one vector!");
  4250. if (MaxVectorSize == 0) {
  4251. LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
  4252. MaxVectorSize = 1;
  4253. return MaxVectorSize;
  4254. } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
  4255. isPowerOf2_32(ConstTripCount)) {
  4256. // We need to clamp the VF to be the ConstTripCount. There is no point in
  4257. // choosing a higher viable VF as done in the loop below.
  4258. LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
  4259. << ConstTripCount << "\n");
  4260. MaxVectorSize = ConstTripCount;
  4261. return MaxVectorSize;
  4262. }
  4263. unsigned MaxVF = MaxVectorSize;
  4264. if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
  4265. (MaximizeBandwidth && isScalarEpilogueAllowed())) {
  4266. // Collect all viable vectorization factors larger than the default MaxVF
  4267. // (i.e. MaxVectorSize).
  4268. SmallVector<unsigned, 8> VFs;
  4269. unsigned NewMaxVectorSize = WidestRegister / SmallestType;
  4270. for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
  4271. VFs.push_back(VS);
  4272. // For each VF calculate its register usage.
  4273. auto RUs = calculateRegisterUsage(VFs);
  4274. // Select the largest VF which doesn't require more registers than existing
  4275. // ones.
  4276. unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
  4277. for (int i = RUs.size() - 1; i >= 0; --i) {
  4278. if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
  4279. MaxVF = VFs[i];
  4280. break;
  4281. }
  4282. }
  4283. if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
  4284. if (MaxVF < MinVF) {
  4285. LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
  4286. << ") with target's minimum: " << MinVF << '\n');
  4287. MaxVF = MinVF;
  4288. }
  4289. }
  4290. }
  4291. return MaxVF;
  4292. }
  4293. VectorizationFactor
  4294. LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
  4295. float Cost = expectedCost(1).first;
  4296. const float ScalarCost = Cost;
  4297. unsigned Width = 1;
  4298. LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
  4299. bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
  4300. if (ForceVectorization && MaxVF > 1) {
  4301. // Ignore scalar width, because the user explicitly wants vectorization.
  4302. // Initialize cost to max so that VF = 2 is, at least, chosen during cost
  4303. // evaluation.
  4304. Cost = std::numeric_limits<float>::max();
  4305. }
  4306. for (unsigned i = 2; i <= MaxVF; i *= 2) {
  4307. // Notice that the vector loop needs to be executed less times, so
  4308. // we need to divide the cost of the vector loops by the width of
  4309. // the vector elements.
  4310. VectorizationCostTy C = expectedCost(i);
  4311. float VectorCost = C.first / (float)i;
  4312. LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
  4313. << " costs: " << (int)VectorCost << ".\n");
  4314. if (!C.second && !ForceVectorization) {
  4315. LLVM_DEBUG(
  4316. dbgs() << "LV: Not considering vector loop of width " << i
  4317. << " because it will not generate any vector instructions.\n");
  4318. continue;
  4319. }
  4320. if (VectorCost < Cost) {
  4321. Cost = VectorCost;
  4322. Width = i;
  4323. }
  4324. }
  4325. if (!EnableCondStoresVectorization && NumPredStores) {
  4326. reportVectorizationFailure("There are conditional stores.",
  4327. "store that is conditionally executed prevents vectorization",
  4328. "ConditionalStore", ORE, TheLoop);
  4329. Width = 1;
  4330. Cost = ScalarCost;
  4331. }
  4332. LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
  4333. << "LV: Vectorization seems to be not beneficial, "
  4334. << "but was forced by a user.\n");
  4335. LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
  4336. VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
  4337. return Factor;
  4338. }
  4339. std::pair<unsigned, unsigned>
  4340. LoopVectorizationCostModel::getSmallestAndWidestTypes() {
  4341. unsigned MinWidth = -1U;
  4342. unsigned MaxWidth = 8;
  4343. const DataLayout &DL = TheFunction->getParent()->getDataLayout();
  4344. // For each block.
  4345. for (BasicBlock *BB : TheLoop->blocks()) {
  4346. // For each instruction in the loop.
  4347. for (Instruction &I : BB->instructionsWithoutDebug()) {
  4348. Type *T = I.getType();
  4349. // Skip ignored values.
  4350. if (ValuesToIgnore.find(&I) != ValuesToIgnore.end())
  4351. continue;
  4352. // Only examine Loads, Stores and PHINodes.
  4353. if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
  4354. continue;
  4355. // Examine PHI nodes that are reduction variables. Update the type to
  4356. // account for the recurrence type.
  4357. if (auto *PN = dyn_cast<PHINode>(&I)) {
  4358. if (!Legal->isReductionVariable(PN))
  4359. continue;
  4360. RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
  4361. T = RdxDesc.getRecurrenceType();
  4362. }
  4363. // Examine the stored values.
  4364. if (auto *ST = dyn_cast<StoreInst>(&I))
  4365. T = ST->getValueOperand()->getType();
  4366. // Ignore loaded pointer types and stored pointer types that are not
  4367. // vectorizable.
  4368. //
  4369. // FIXME: The check here attempts to predict whether a load or store will
  4370. // be vectorized. We only know this for certain after a VF has
  4371. // been selected. Here, we assume that if an access can be
  4372. // vectorized, it will be. We should also look at extending this
  4373. // optimization to non-pointer types.
  4374. //
  4375. if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
  4376. !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
  4377. continue;
  4378. MinWidth = std::min(MinWidth,
  4379. (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
  4380. MaxWidth = std::max(MaxWidth,
  4381. (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
  4382. }
  4383. }
  4384. return {MinWidth, MaxWidth};
  4385. }
  4386. unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF,
  4387. unsigned LoopCost) {
  4388. // -- The interleave heuristics --
  4389. // We interleave the loop in order to expose ILP and reduce the loop overhead.
  4390. // There are many micro-architectural considerations that we can't predict
  4391. // at this level. For example, frontend pressure (on decode or fetch) due to
  4392. // code size, or the number and capabilities of the execution ports.
  4393. //
  4394. // We use the following heuristics to select the interleave count:
  4395. // 1. If the code has reductions, then we interleave to break the cross
  4396. // iteration dependency.
  4397. // 2. If the loop is really small, then we interleave to reduce the loop
  4398. // overhead.
  4399. // 3. We don't interleave if we think that we will spill registers to memory
  4400. // due to the increased register pressure.
  4401. if (!isScalarEpilogueAllowed())
  4402. return 1;
  4403. // We used the distance for the interleave count.
  4404. if (Legal->getMaxSafeDepDistBytes() != -1U)
  4405. return 1;
  4406. // Do not interleave loops with a relatively small trip count.
  4407. unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
  4408. if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
  4409. return 1;
  4410. unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
  4411. LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
  4412. << " registers\n");
  4413. if (VF == 1) {
  4414. if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
  4415. TargetNumRegisters = ForceTargetNumScalarRegs;
  4416. } else {
  4417. if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
  4418. TargetNumRegisters = ForceTargetNumVectorRegs;
  4419. }
  4420. RegisterUsage R = calculateRegisterUsage({VF})[0];
  4421. // We divide by these constants so assume that we have at least one
  4422. // instruction that uses at least one register.
  4423. R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
  4424. // We calculate the interleave count using the following formula.
  4425. // Subtract the number of loop invariants from the number of available
  4426. // registers. These registers are used by all of the interleaved instances.
  4427. // Next, divide the remaining registers by the number of registers that is
  4428. // required by the loop, in order to estimate how many parallel instances
  4429. // fit without causing spills. All of this is rounded down if necessary to be
  4430. // a power of two. We want power of two interleave count to simplify any
  4431. // addressing operations or alignment considerations.
  4432. // We also want power of two interleave counts to ensure that the induction
  4433. // variable of the vector loop wraps to zero, when tail is folded by masking;
  4434. // this currently happens when OptForSize, in which case IC is set to 1 above.
  4435. unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
  4436. R.MaxLocalUsers);
  4437. // Don't count the induction variable as interleaved.
  4438. if (EnableIndVarRegisterHeur)
  4439. IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
  4440. std::max(1U, (R.MaxLocalUsers - 1)));
  4441. // Clamp the interleave ranges to reasonable counts.
  4442. unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
  4443. // Check if the user has overridden the max.
  4444. if (VF == 1) {
  4445. if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
  4446. MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
  4447. } else {
  4448. if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
  4449. MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
  4450. }
  4451. // If the trip count is constant, limit the interleave count to be less than
  4452. // the trip count divided by VF.
  4453. if (TC > 0) {
  4454. assert(TC >= VF && "VF exceeds trip count?");
  4455. if ((TC / VF) < MaxInterleaveCount)
  4456. MaxInterleaveCount = (TC / VF);
  4457. }
  4458. // If we did not calculate the cost for VF (because the user selected the VF)
  4459. // then we calculate the cost of VF here.
  4460. if (LoopCost == 0)
  4461. LoopCost = expectedCost(VF).first;
  4462. assert(LoopCost && "Non-zero loop cost expected");
  4463. // Clamp the calculated IC to be between the 1 and the max interleave count
  4464. // that the target and trip count allows.
  4465. if (IC > MaxInterleaveCount)
  4466. IC = MaxInterleaveCount;
  4467. else if (IC < 1)
  4468. IC = 1;
  4469. // Interleave if we vectorized this loop and there is a reduction that could
  4470. // benefit from interleaving.
  4471. if (VF > 1 && !Legal->getReductionVars()->empty()) {
  4472. LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
  4473. return IC;
  4474. }
  4475. // Note that if we've already vectorized the loop we will have done the
  4476. // runtime check and so interleaving won't require further checks.
  4477. bool InterleavingRequiresRuntimePointerCheck =
  4478. (VF == 1 && Legal->getRuntimePointerChecking()->Need);
  4479. // We want to interleave small loops in order to reduce the loop overhead and
  4480. // potentially expose ILP opportunities.
  4481. LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
  4482. if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
  4483. // We assume that the cost overhead is 1 and we use the cost model
  4484. // to estimate the cost of the loop and interleave until the cost of the
  4485. // loop overhead is about 5% of the cost of the loop.
  4486. unsigned SmallIC =
  4487. std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
  4488. // Interleave until store/load ports (estimated by max interleave count) are
  4489. // saturated.
  4490. unsigned NumStores = Legal->getNumStores();
  4491. unsigned NumLoads = Legal->getNumLoads();
  4492. unsigned StoresIC = IC / (NumStores ? NumStores : 1);
  4493. unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
  4494. // If we have a scalar reduction (vector reductions are already dealt with
  4495. // by this point), we can increase the critical path length if the loop
  4496. // we're interleaving is inside another loop. Limit, by default to 2, so the
  4497. // critical path only gets increased by one reduction operation.
  4498. if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) {
  4499. unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
  4500. SmallIC = std::min(SmallIC, F);
  4501. StoresIC = std::min(StoresIC, F);
  4502. LoadsIC = std::min(LoadsIC, F);
  4503. }
  4504. if (EnableLoadStoreRuntimeInterleave &&
  4505. std::max(StoresIC, LoadsIC) > SmallIC) {
  4506. LLVM_DEBUG(
  4507. dbgs() << "LV: Interleaving to saturate store or load ports.\n");
  4508. return std::max(StoresIC, LoadsIC);
  4509. }
  4510. LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
  4511. return SmallIC;
  4512. }
  4513. // Interleave if this is a large loop (small loops are already dealt with by
  4514. // this point) that could benefit from interleaving.
  4515. bool HasReductions = !Legal->getReductionVars()->empty();
  4516. if (TTI.enableAggressiveInterleaving(HasReductions)) {
  4517. LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
  4518. return IC;
  4519. }
  4520. LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
  4521. return 1;
  4522. }
  4523. SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
  4524. LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
  4525. // This function calculates the register usage by measuring the highest number
  4526. // of values that are alive at a single location. Obviously, this is a very
  4527. // rough estimation. We scan the loop in a topological order in order and
  4528. // assign a number to each instruction. We use RPO to ensure that defs are
  4529. // met before their users. We assume that each instruction that has in-loop
  4530. // users starts an interval. We record every time that an in-loop value is
  4531. // used, so we have a list of the first and last occurrences of each
  4532. // instruction. Next, we transpose this data structure into a multi map that
  4533. // holds the list of intervals that *end* at a specific location. This multi
  4534. // map allows us to perform a linear search. We scan the instructions linearly
  4535. // and record each time that a new interval starts, by placing it in a set.
  4536. // If we find this value in the multi-map then we remove it from the set.
  4537. // The max register usage is the maximum size of the set.
  4538. // We also search for instructions that are defined outside the loop, but are
  4539. // used inside the loop. We need this number separately from the max-interval
  4540. // usage number because when we unroll, loop-invariant values do not take
  4541. // more register.
  4542. LoopBlocksDFS DFS(TheLoop);
  4543. DFS.perform(LI);
  4544. RegisterUsage RU;
  4545. // Each 'key' in the map opens a new interval. The values
  4546. // of the map are the index of the 'last seen' usage of the
  4547. // instruction that is the key.
  4548. using IntervalMap = DenseMap<Instruction *, unsigned>;
  4549. // Maps instruction to its index.
  4550. SmallVector<Instruction *, 64> IdxToInstr;
  4551. // Marks the end of each interval.
  4552. IntervalMap EndPoint;
  4553. // Saves the list of instruction indices that are used in the loop.
  4554. SmallPtrSet<Instruction *, 8> Ends;
  4555. // Saves the list of values that are used in the loop but are
  4556. // defined outside the loop, such as arguments and constants.
  4557. SmallPtrSet<Value *, 8> LoopInvariants;
  4558. for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
  4559. for (Instruction &I : BB->instructionsWithoutDebug()) {
  4560. IdxToInstr.push_back(&I);
  4561. // Save the end location of each USE.
  4562. for (Value *U : I.operands()) {
  4563. auto *Instr = dyn_cast<Instruction>(U);
  4564. // Ignore non-instruction values such as arguments, constants, etc.
  4565. if (!Instr)
  4566. continue;
  4567. // If this instruction is outside the loop then record it and continue.
  4568. if (!TheLoop->contains(Instr)) {
  4569. LoopInvariants.insert(Instr);
  4570. continue;
  4571. }
  4572. // Overwrite previous end points.
  4573. EndPoint[Instr] = IdxToInstr.size();
  4574. Ends.insert(Instr);
  4575. }
  4576. }
  4577. }
  4578. // Saves the list of intervals that end with the index in 'key'.
  4579. using InstrList = SmallVector<Instruction *, 2>;
  4580. DenseMap<unsigned, InstrList> TransposeEnds;
  4581. // Transpose the EndPoints to a list of values that end at each index.
  4582. for (auto &Interval : EndPoint)
  4583. TransposeEnds[Interval.second].push_back(Interval.first);
  4584. SmallPtrSet<Instruction *, 8> OpenIntervals;
  4585. // Get the size of the widest register.
  4586. unsigned MaxSafeDepDist = -1U;
  4587. if (Legal->getMaxSafeDepDistBytes() != -1U)
  4588. MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
  4589. unsigned WidestRegister =
  4590. std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
  4591. const DataLayout &DL = TheFunction->getParent()->getDataLayout();
  4592. SmallVector<RegisterUsage, 8> RUs(VFs.size());
  4593. SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
  4594. LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
  4595. // A lambda that gets the register usage for the given type and VF.
  4596. auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
  4597. if (Ty->isTokenTy())
  4598. return 0U;
  4599. unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
  4600. return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
  4601. };
  4602. for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
  4603. Instruction *I = IdxToInstr[i];
  4604. // Remove all of the instructions that end at this location.
  4605. InstrList &List = TransposeEnds[i];
  4606. for (Instruction *ToRemove : List)
  4607. OpenIntervals.erase(ToRemove);
  4608. // Ignore instructions that are never used within the loop.
  4609. if (Ends.find(I) == Ends.end())
  4610. continue;
  4611. // Skip ignored values.
  4612. if (ValuesToIgnore.find(I) != ValuesToIgnore.end())
  4613. continue;
  4614. // For each VF find the maximum usage of registers.
  4615. for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
  4616. if (VFs[j] == 1) {
  4617. MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
  4618. continue;
  4619. }
  4620. collectUniformsAndScalars(VFs[j]);
  4621. // Count the number of live intervals.
  4622. unsigned RegUsage = 0;
  4623. for (auto Inst : OpenIntervals) {
  4624. // Skip ignored values for VF > 1.
  4625. if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() ||
  4626. isScalarAfterVectorization(Inst, VFs[j]))
  4627. continue;
  4628. RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
  4629. }
  4630. MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
  4631. }
  4632. LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
  4633. << OpenIntervals.size() << '\n');
  4634. // Add the current instruction to the list of open intervals.
  4635. OpenIntervals.insert(I);
  4636. }
  4637. for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
  4638. unsigned Invariant = 0;
  4639. if (VFs[i] == 1)
  4640. Invariant = LoopInvariants.size();
  4641. else {
  4642. for (auto Inst : LoopInvariants)
  4643. Invariant += GetRegUsage(Inst->getType(), VFs[i]);
  4644. }
  4645. LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
  4646. LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
  4647. LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant
  4648. << '\n');
  4649. RU.LoopInvariantRegs = Invariant;
  4650. RU.MaxLocalUsers = MaxUsages[i];
  4651. RUs[i] = RU;
  4652. }
  4653. return RUs;
  4654. }
  4655. bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
  4656. // TODO: Cost model for emulated masked load/store is completely
  4657. // broken. This hack guides the cost model to use an artificially
  4658. // high enough value to practically disable vectorization with such
  4659. // operations, except where previously deployed legality hack allowed
  4660. // using very low cost values. This is to avoid regressions coming simply
  4661. // from moving "masked load/store" check from legality to cost model.
  4662. // Masked Load/Gather emulation was previously never allowed.
  4663. // Limited number of Masked Store/Scatter emulation was allowed.
  4664. assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
  4665. return isa<LoadInst>(I) ||
  4666. (isa<StoreInst>(I) &&
  4667. NumPredStores > NumberOfStoresToPredicate);
  4668. }
  4669. void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
  4670. // If we aren't vectorizing the loop, or if we've already collected the
  4671. // instructions to scalarize, there's nothing to do. Collection may already
  4672. // have occurred if we have a user-selected VF and are now computing the
  4673. // expected cost for interleaving.
  4674. if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
  4675. return;
  4676. // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
  4677. // not profitable to scalarize any instructions, the presence of VF in the
  4678. // map will indicate that we've analyzed it already.
  4679. ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
  4680. // Find all the instructions that are scalar with predication in the loop and
  4681. // determine if it would be better to not if-convert the blocks they are in.
  4682. // If so, we also record the instructions to scalarize.
  4683. for (BasicBlock *BB : TheLoop->blocks()) {
  4684. if (!blockNeedsPredication(BB))
  4685. continue;
  4686. for (Instruction &I : *BB)
  4687. if (isScalarWithPredication(&I)) {
  4688. ScalarCostsTy ScalarCosts;
  4689. // Do not apply discount logic if hacked cost is needed
  4690. // for emulated masked memrefs.
  4691. if (!useEmulatedMaskMemRefHack(&I) &&
  4692. computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
  4693. ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
  4694. // Remember that BB will remain after vectorization.
  4695. PredicatedBBsAfterVectorization.insert(BB);
  4696. }
  4697. }
  4698. }
  4699. int LoopVectorizationCostModel::computePredInstDiscount(
  4700. Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
  4701. unsigned VF) {
  4702. assert(!isUniformAfterVectorization(PredInst, VF) &&
  4703. "Instruction marked uniform-after-vectorization will be predicated");
  4704. // Initialize the discount to zero, meaning that the scalar version and the
  4705. // vector version cost the same.
  4706. int Discount = 0;
  4707. // Holds instructions to analyze. The instructions we visit are mapped in
  4708. // ScalarCosts. Those instructions are the ones that would be scalarized if
  4709. // we find that the scalar version costs less.
  4710. SmallVector<Instruction *, 8> Worklist;
  4711. // Returns true if the given instruction can be scalarized.
  4712. auto canBeScalarized = [&](Instruction *I) -> bool {
  4713. // We only attempt to scalarize instructions forming a single-use chain
  4714. // from the original predicated block that would otherwise be vectorized.
  4715. // Although not strictly necessary, we give up on instructions we know will
  4716. // already be scalar to avoid traversing chains that are unlikely to be
  4717. // beneficial.
  4718. if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
  4719. isScalarAfterVectorization(I, VF))
  4720. return false;
  4721. // If the instruction is scalar with predication, it will be analyzed
  4722. // separately. We ignore it within the context of PredInst.
  4723. if (isScalarWithPredication(I))
  4724. return false;
  4725. // If any of the instruction's operands are uniform after vectorization,
  4726. // the instruction cannot be scalarized. This prevents, for example, a
  4727. // masked load from being scalarized.
  4728. //
  4729. // We assume we will only emit a value for lane zero of an instruction
  4730. // marked uniform after vectorization, rather than VF identical values.
  4731. // Thus, if we scalarize an instruction that uses a uniform, we would
  4732. // create uses of values corresponding to the lanes we aren't emitting code
  4733. // for. This behavior can be changed by allowing getScalarValue to clone
  4734. // the lane zero values for uniforms rather than asserting.
  4735. for (Use &U : I->operands())
  4736. if (auto *J = dyn_cast<Instruction>(U.get()))
  4737. if (isUniformAfterVectorization(J, VF))
  4738. return false;
  4739. // Otherwise, we can scalarize the instruction.
  4740. return true;
  4741. };
  4742. // Compute the expected cost discount from scalarizing the entire expression
  4743. // feeding the predicated instruction. We currently only consider expressions
  4744. // that are single-use instruction chains.
  4745. Worklist.push_back(PredInst);
  4746. while (!Worklist.empty()) {
  4747. Instruction *I = Worklist.pop_back_val();
  4748. // If we've already analyzed the instruction, there's nothing to do.
  4749. if (ScalarCosts.find(I) != ScalarCosts.end())
  4750. continue;
  4751. // Compute the cost of the vector instruction. Note that this cost already
  4752. // includes the scalarization overhead of the predicated instruction.
  4753. unsigned VectorCost = getInstructionCost(I, VF).first;
  4754. // Compute the cost of the scalarized instruction. This cost is the cost of
  4755. // the instruction as if it wasn't if-converted and instead remained in the
  4756. // predicated block. We will scale this cost by block probability after
  4757. // computing the scalarization overhead.
  4758. unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
  4759. // Compute the scalarization overhead of needed insertelement instructions
  4760. // and phi nodes.
  4761. if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
  4762. ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
  4763. true, false);
  4764. ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
  4765. }
  4766. // Compute the scalarization overhead of needed extractelement
  4767. // instructions. For each of the instruction's operands, if the operand can
  4768. // be scalarized, add it to the worklist; otherwise, account for the
  4769. // overhead.
  4770. for (Use &U : I->operands())
  4771. if (auto *J = dyn_cast<Instruction>(U.get())) {
  4772. assert(VectorType::isValidElementType(J->getType()) &&
  4773. "Instruction has non-scalar type");
  4774. if (canBeScalarized(J))
  4775. Worklist.push_back(J);
  4776. else if (needsExtract(J, VF))
  4777. ScalarCost += TTI.getScalarizationOverhead(
  4778. ToVectorTy(J->getType(),VF), false, true);
  4779. }
  4780. // Scale the total scalar cost by block probability.
  4781. ScalarCost /= getReciprocalPredBlockProb();
  4782. // Compute the discount. A non-negative discount means the vector version
  4783. // of the instruction costs more, and scalarizing would be beneficial.
  4784. Discount += VectorCost - ScalarCost;
  4785. ScalarCosts[I] = ScalarCost;
  4786. }
  4787. return Discount;
  4788. }
  4789. LoopVectorizationCostModel::VectorizationCostTy
  4790. LoopVectorizationCostModel::expectedCost(unsigned VF) {
  4791. VectorizationCostTy Cost;
  4792. // For each block.
  4793. for (BasicBlock *BB : TheLoop->blocks()) {
  4794. VectorizationCostTy BlockCost;
  4795. // For each instruction in the old loop.
  4796. for (Instruction &I : BB->instructionsWithoutDebug()) {
  4797. // Skip ignored values.
  4798. if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() ||
  4799. (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end()))
  4800. continue;
  4801. VectorizationCostTy C = getInstructionCost(&I, VF);
  4802. // Check if we should override the cost.
  4803. if (ForceTargetInstructionCost.getNumOccurrences() > 0)
  4804. C.first = ForceTargetInstructionCost;
  4805. BlockCost.first += C.first;
  4806. BlockCost.second |= C.second;
  4807. LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
  4808. << " for VF " << VF << " For instruction: " << I
  4809. << '\n');
  4810. }
  4811. // If we are vectorizing a predicated block, it will have been
  4812. // if-converted. This means that the block's instructions (aside from
  4813. // stores and instructions that may divide by zero) will now be
  4814. // unconditionally executed. For the scalar case, we may not always execute
  4815. // the predicated block. Thus, scale the block's cost by the probability of
  4816. // executing it.
  4817. if (VF == 1 && blockNeedsPredication(BB))
  4818. BlockCost.first /= getReciprocalPredBlockProb();
  4819. Cost.first += BlockCost.first;
  4820. Cost.second |= BlockCost.second;
  4821. }
  4822. return Cost;
  4823. }
  4824. /// Gets Address Access SCEV after verifying that the access pattern
  4825. /// is loop invariant except the induction variable dependence.
  4826. ///
  4827. /// This SCEV can be sent to the Target in order to estimate the address
  4828. /// calculation cost.
  4829. static const SCEV *getAddressAccessSCEV(
  4830. Value *Ptr,
  4831. LoopVectorizationLegality *Legal,
  4832. PredicatedScalarEvolution &PSE,
  4833. const Loop *TheLoop) {
  4834. auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
  4835. if (!Gep)
  4836. return nullptr;
  4837. // We are looking for a gep with all loop invariant indices except for one
  4838. // which should be an induction variable.
  4839. auto SE = PSE.getSE();
  4840. unsigned NumOperands = Gep->getNumOperands();
  4841. for (unsigned i = 1; i < NumOperands; ++i) {
  4842. Value *Opd = Gep->getOperand(i);
  4843. if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
  4844. !Legal->isInductionVariable(Opd))
  4845. return nullptr;
  4846. }
  4847. // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
  4848. return PSE.getSCEV(Ptr);
  4849. }
  4850. static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
  4851. return Legal->hasStride(I->getOperand(0)) ||
  4852. Legal->hasStride(I->getOperand(1));
  4853. }
  4854. unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
  4855. unsigned VF) {
  4856. assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
  4857. Type *ValTy = getMemInstValueType(I);
  4858. auto SE = PSE.getSE();
  4859. unsigned Alignment = getLoadStoreAlignment(I);
  4860. unsigned AS = getLoadStoreAddressSpace(I);
  4861. Value *Ptr = getLoadStorePointerOperand(I);
  4862. Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
  4863. // Figure out whether the access is strided and get the stride value
  4864. // if it's known in compile time
  4865. const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
  4866. // Get the cost of the scalar memory instruction and address computation.
  4867. unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
  4868. // Don't pass *I here, since it is scalar but will actually be part of a
  4869. // vectorized loop where the user of it is a vectorized instruction.
  4870. Cost += VF *
  4871. TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
  4872. AS);
  4873. // Get the overhead of the extractelement and insertelement instructions
  4874. // we might create due to scalarization.
  4875. Cost += getScalarizationOverhead(I, VF);
  4876. // If we have a predicated store, it may not be executed for each vector
  4877. // lane. Scale the cost by the probability of executing the predicated
  4878. // block.
  4879. if (isPredicatedInst(I)) {
  4880. Cost /= getReciprocalPredBlockProb();
  4881. if (useEmulatedMaskMemRefHack(I))
  4882. // Artificially setting to a high enough value to practically disable
  4883. // vectorization with such operations.
  4884. Cost = 3000000;
  4885. }
  4886. return Cost;
  4887. }
  4888. unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
  4889. unsigned VF) {
  4890. Type *ValTy = getMemInstValueType(I);
  4891. Type *VectorTy = ToVectorTy(ValTy, VF);
  4892. unsigned Alignment = getLoadStoreAlignment(I);
  4893. Value *Ptr = getLoadStorePointerOperand(I);
  4894. unsigned AS = getLoadStoreAddressSpace(I);
  4895. int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
  4896. assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
  4897. "Stride should be 1 or -1 for consecutive memory access");
  4898. unsigned Cost = 0;
  4899. if (Legal->isMaskRequired(I))
  4900. Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
  4901. else
  4902. Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
  4903. bool Reverse = ConsecutiveStride < 0;
  4904. if (Reverse)
  4905. Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
  4906. return Cost;
  4907. }
  4908. unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
  4909. unsigned VF) {
  4910. Type *ValTy = getMemInstValueType(I);
  4911. Type *VectorTy = ToVectorTy(ValTy, VF);
  4912. unsigned Alignment = getLoadStoreAlignment(I);
  4913. unsigned AS = getLoadStoreAddressSpace(I);
  4914. if (isa<LoadInst>(I)) {
  4915. return TTI.getAddressComputationCost(ValTy) +
  4916. TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
  4917. TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
  4918. }
  4919. StoreInst *SI = cast<StoreInst>(I);
  4920. bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
  4921. return TTI.getAddressComputationCost(ValTy) +
  4922. TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) +
  4923. (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost(
  4924. Instruction::ExtractElement,
  4925. VectorTy, VF - 1));
  4926. }
  4927. unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
  4928. unsigned VF) {
  4929. Type *ValTy = getMemInstValueType(I);
  4930. Type *VectorTy = ToVectorTy(ValTy, VF);
  4931. unsigned Alignment = getLoadStoreAlignment(I);
  4932. Value *Ptr = getLoadStorePointerOperand(I);
  4933. return TTI.getAddressComputationCost(VectorTy) +
  4934. TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
  4935. Legal->isMaskRequired(I), Alignment);
  4936. }
  4937. unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
  4938. unsigned VF) {
  4939. Type *ValTy = getMemInstValueType(I);
  4940. Type *VectorTy = ToVectorTy(ValTy, VF);
  4941. unsigned AS = getLoadStoreAddressSpace(I);
  4942. auto Group = getInterleavedAccessGroup(I);
  4943. assert(Group && "Fail to get an interleaved access group.");
  4944. unsigned InterleaveFactor = Group->getFactor();
  4945. Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
  4946. // Holds the indices of existing members in an interleaved load group.
  4947. // An interleaved store group doesn't need this as it doesn't allow gaps.
  4948. SmallVector<unsigned, 4> Indices;
  4949. if (isa<LoadInst>(I)) {
  4950. for (unsigned i = 0; i < InterleaveFactor; i++)
  4951. if (Group->getMember(i))
  4952. Indices.push_back(i);
  4953. }
  4954. // Calculate the cost of the whole interleaved group.
  4955. bool UseMaskForGaps =
  4956. Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
  4957. unsigned Cost = TTI.getInterleavedMemoryOpCost(
  4958. I->getOpcode(), WideVecTy, Group->getFactor(), Indices,
  4959. Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps);
  4960. if (Group->isReverse()) {
  4961. // TODO: Add support for reversed masked interleaved access.
  4962. assert(!Legal->isMaskRequired(I) &&
  4963. "Reverse masked interleaved access not supported.");
  4964. Cost += Group->getNumMembers() *
  4965. TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
  4966. }
  4967. return Cost;
  4968. }
  4969. unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
  4970. unsigned VF) {
  4971. // Calculate scalar cost only. Vectorization cost should be ready at this
  4972. // moment.
  4973. if (VF == 1) {
  4974. Type *ValTy = getMemInstValueType(I);
  4975. unsigned Alignment = getLoadStoreAlignment(I);
  4976. unsigned AS = getLoadStoreAddressSpace(I);
  4977. return TTI.getAddressComputationCost(ValTy) +
  4978. TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
  4979. }
  4980. return getWideningCost(I, VF);
  4981. }
  4982. LoopVectorizationCostModel::VectorizationCostTy
  4983. LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
  4984. // If we know that this instruction will remain uniform, check the cost of
  4985. // the scalar version.
  4986. if (isUniformAfterVectorization(I, VF))
  4987. VF = 1;
  4988. if (VF > 1 && isProfitableToScalarize(I, VF))
  4989. return VectorizationCostTy(InstsToScalarize[VF][I], false);
  4990. // Forced scalars do not have any scalarization overhead.
  4991. auto ForcedScalar = ForcedScalars.find(VF);
  4992. if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
  4993. auto InstSet = ForcedScalar->second;
  4994. if (InstSet.find(I) != InstSet.end())
  4995. return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
  4996. }
  4997. Type *VectorTy;
  4998. unsigned C = getInstructionCost(I, VF, VectorTy);
  4999. bool TypeNotScalarized =
  5000. VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
  5001. return VectorizationCostTy(C, TypeNotScalarized);
  5002. }
  5003. unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
  5004. unsigned VF) {
  5005. if (VF == 1)
  5006. return 0;
  5007. unsigned Cost = 0;
  5008. Type *RetTy = ToVectorTy(I->getType(), VF);
  5009. if (!RetTy->isVoidTy() &&
  5010. (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
  5011. Cost += TTI.getScalarizationOverhead(RetTy, true, false);
  5012. // Some targets keep addresses scalar.
  5013. if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
  5014. return Cost;
  5015. // Some targets support efficient element stores.
  5016. if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
  5017. return Cost;
  5018. // Collect operands to consider.
  5019. CallInst *CI = dyn_cast<CallInst>(I);
  5020. Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
  5021. // Skip operands that do not require extraction/scalarization and do not incur
  5022. // any overhead.
  5023. return Cost + TTI.getOperandsScalarizationOverhead(
  5024. filterExtractingOperands(Ops, VF), VF);
  5025. }
  5026. void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
  5027. if (VF == 1)
  5028. return;
  5029. NumPredStores = 0;
  5030. for (BasicBlock *BB : TheLoop->blocks()) {
  5031. // For each instruction in the old loop.
  5032. for (Instruction &I : *BB) {
  5033. Value *Ptr = getLoadStorePointerOperand(&I);
  5034. if (!Ptr)
  5035. continue;
  5036. // TODO: We should generate better code and update the cost model for
  5037. // predicated uniform stores. Today they are treated as any other
  5038. // predicated store (see added test cases in
  5039. // invariant-store-vectorization.ll).
  5040. if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
  5041. NumPredStores++;
  5042. if (Legal->isUniform(Ptr) &&
  5043. // Conditional loads and stores should be scalarized and predicated.
  5044. // isScalarWithPredication cannot be used here since masked
  5045. // gather/scatters are not considered scalar with predication.
  5046. !Legal->blockNeedsPredication(I.getParent())) {
  5047. // TODO: Avoid replicating loads and stores instead of
  5048. // relying on instcombine to remove them.
  5049. // Load: Scalar load + broadcast
  5050. // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
  5051. unsigned Cost = getUniformMemOpCost(&I, VF);
  5052. setWideningDecision(&I, VF, CM_Scalarize, Cost);
  5053. continue;
  5054. }
  5055. // We assume that widening is the best solution when possible.
  5056. if (memoryInstructionCanBeWidened(&I, VF)) {
  5057. unsigned Cost = getConsecutiveMemOpCost(&I, VF);
  5058. int ConsecutiveStride =
  5059. Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
  5060. assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
  5061. "Expected consecutive stride.");
  5062. InstWidening Decision =
  5063. ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
  5064. setWideningDecision(&I, VF, Decision, Cost);
  5065. continue;
  5066. }
  5067. // Choose between Interleaving, Gather/Scatter or Scalarization.
  5068. unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
  5069. unsigned NumAccesses = 1;
  5070. if (isAccessInterleaved(&I)) {
  5071. auto Group = getInterleavedAccessGroup(&I);
  5072. assert(Group && "Fail to get an interleaved access group.");
  5073. // Make one decision for the whole group.
  5074. if (getWideningDecision(&I, VF) != CM_Unknown)
  5075. continue;
  5076. NumAccesses = Group->getNumMembers();
  5077. if (interleavedAccessCanBeWidened(&I, VF))
  5078. InterleaveCost = getInterleaveGroupCost(&I, VF);
  5079. }
  5080. unsigned GatherScatterCost =
  5081. isLegalGatherOrScatter(&I)
  5082. ? getGatherScatterCost(&I, VF) * NumAccesses
  5083. : std::numeric_limits<unsigned>::max();
  5084. unsigned ScalarizationCost =
  5085. getMemInstScalarizationCost(&I, VF) * NumAccesses;
  5086. // Choose better solution for the current VF,
  5087. // write down this decision and use it during vectorization.
  5088. unsigned Cost;
  5089. InstWidening Decision;
  5090. if (InterleaveCost <= GatherScatterCost &&
  5091. InterleaveCost < ScalarizationCost) {
  5092. Decision = CM_Interleave;
  5093. Cost = InterleaveCost;
  5094. } else if (GatherScatterCost < ScalarizationCost) {
  5095. Decision = CM_GatherScatter;
  5096. Cost = GatherScatterCost;
  5097. } else {
  5098. Decision = CM_Scalarize;
  5099. Cost = ScalarizationCost;
  5100. }
  5101. // If the instructions belongs to an interleave group, the whole group
  5102. // receives the same decision. The whole group receives the cost, but
  5103. // the cost will actually be assigned to one instruction.
  5104. if (auto Group = getInterleavedAccessGroup(&I))
  5105. setWideningDecision(Group, VF, Decision, Cost);
  5106. else
  5107. setWideningDecision(&I, VF, Decision, Cost);
  5108. }
  5109. }
  5110. // Make sure that any load of address and any other address computation
  5111. // remains scalar unless there is gather/scatter support. This avoids
  5112. // inevitable extracts into address registers, and also has the benefit of
  5113. // activating LSR more, since that pass can't optimize vectorized
  5114. // addresses.
  5115. if (TTI.prefersVectorizedAddressing())
  5116. return;
  5117. // Start with all scalar pointer uses.
  5118. SmallPtrSet<Instruction *, 8> AddrDefs;
  5119. for (BasicBlock *BB : TheLoop->blocks())
  5120. for (Instruction &I : *BB) {
  5121. Instruction *PtrDef =
  5122. dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
  5123. if (PtrDef && TheLoop->contains(PtrDef) &&
  5124. getWideningDecision(&I, VF) != CM_GatherScatter)
  5125. AddrDefs.insert(PtrDef);
  5126. }
  5127. // Add all instructions used to generate the addresses.
  5128. SmallVector<Instruction *, 4> Worklist;
  5129. for (auto *I : AddrDefs)
  5130. Worklist.push_back(I);
  5131. while (!Worklist.empty()) {
  5132. Instruction *I = Worklist.pop_back_val();
  5133. for (auto &Op : I->operands())
  5134. if (auto *InstOp = dyn_cast<Instruction>(Op))
  5135. if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
  5136. AddrDefs.insert(InstOp).second)
  5137. Worklist.push_back(InstOp);
  5138. }
  5139. for (auto *I : AddrDefs) {
  5140. if (isa<LoadInst>(I)) {
  5141. // Setting the desired widening decision should ideally be handled in
  5142. // by cost functions, but since this involves the task of finding out
  5143. // if the loaded register is involved in an address computation, it is
  5144. // instead changed here when we know this is the case.
  5145. InstWidening Decision = getWideningDecision(I, VF);
  5146. if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
  5147. // Scalarize a widened load of address.
  5148. setWideningDecision(I, VF, CM_Scalarize,
  5149. (VF * getMemoryInstructionCost(I, 1)));
  5150. else if (auto Group = getInterleavedAccessGroup(I)) {
  5151. // Scalarize an interleave group of address loads.
  5152. for (unsigned I = 0; I < Group->getFactor(); ++I) {
  5153. if (Instruction *Member = Group->getMember(I))
  5154. setWideningDecision(Member, VF, CM_Scalarize,
  5155. (VF * getMemoryInstructionCost(Member, 1)));
  5156. }
  5157. }
  5158. } else
  5159. // Make sure I gets scalarized and a cost estimate without
  5160. // scalarization overhead.
  5161. ForcedScalars[VF].insert(I);
  5162. }
  5163. }
  5164. unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
  5165. unsigned VF,
  5166. Type *&VectorTy) {
  5167. Type *RetTy = I->getType();
  5168. if (canTruncateToMinimalBitwidth(I, VF))
  5169. RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
  5170. VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
  5171. auto SE = PSE.getSE();
  5172. // TODO: We need to estimate the cost of intrinsic calls.
  5173. switch (I->getOpcode()) {
  5174. case Instruction::GetElementPtr:
  5175. // We mark this instruction as zero-cost because the cost of GEPs in
  5176. // vectorized code depends on whether the corresponding memory instruction
  5177. // is scalarized or not. Therefore, we handle GEPs with the memory
  5178. // instruction cost.
  5179. return 0;
  5180. case Instruction::Br: {
  5181. // In cases of scalarized and predicated instructions, there will be VF
  5182. // predicated blocks in the vectorized loop. Each branch around these
  5183. // blocks requires also an extract of its vector compare i1 element.
  5184. bool ScalarPredicatedBB = false;
  5185. BranchInst *BI = cast<BranchInst>(I);
  5186. if (VF > 1 && BI->isConditional() &&
  5187. (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) !=
  5188. PredicatedBBsAfterVectorization.end() ||
  5189. PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) !=
  5190. PredicatedBBsAfterVectorization.end()))
  5191. ScalarPredicatedBB = true;
  5192. if (ScalarPredicatedBB) {
  5193. // Return cost for branches around scalarized and predicated blocks.
  5194. Type *Vec_i1Ty =
  5195. VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
  5196. return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
  5197. (TTI.getCFInstrCost(Instruction::Br) * VF));
  5198. } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
  5199. // The back-edge branch will remain, as will all scalar branches.
  5200. return TTI.getCFInstrCost(Instruction::Br);
  5201. else
  5202. // This branch will be eliminated by if-conversion.
  5203. return 0;
  5204. // Note: We currently assume zero cost for an unconditional branch inside
  5205. // a predicated block since it will become a fall-through, although we
  5206. // may decide in the future to call TTI for all branches.
  5207. }
  5208. case Instruction::PHI: {
  5209. auto *Phi = cast<PHINode>(I);
  5210. // First-order recurrences are replaced by vector shuffles inside the loop.
  5211. // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
  5212. if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
  5213. return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
  5214. VectorTy, VF - 1, VectorType::get(RetTy, 1));
  5215. // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
  5216. // converted into select instructions. We require N - 1 selects per phi
  5217. // node, where N is the number of incoming values.
  5218. if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
  5219. return (Phi->getNumIncomingValues() - 1) *
  5220. TTI.getCmpSelInstrCost(
  5221. Instruction::Select, ToVectorTy(Phi->getType(), VF),
  5222. ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
  5223. return TTI.getCFInstrCost(Instruction::PHI);
  5224. }
  5225. case Instruction::UDiv:
  5226. case Instruction::SDiv:
  5227. case Instruction::URem:
  5228. case Instruction::SRem:
  5229. // If we have a predicated instruction, it may not be executed for each
  5230. // vector lane. Get the scalarization cost and scale this amount by the
  5231. // probability of executing the predicated block. If the instruction is not
  5232. // predicated, we fall through to the next case.
  5233. if (VF > 1 && isScalarWithPredication(I)) {
  5234. unsigned Cost = 0;
  5235. // These instructions have a non-void type, so account for the phi nodes
  5236. // that we will create. This cost is likely to be zero. The phi node
  5237. // cost, if any, should be scaled by the block probability because it
  5238. // models a copy at the end of each predicated block.
  5239. Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
  5240. // The cost of the non-predicated instruction.
  5241. Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
  5242. // The cost of insertelement and extractelement instructions needed for
  5243. // scalarization.
  5244. Cost += getScalarizationOverhead(I, VF);
  5245. // Scale the cost by the probability of executing the predicated blocks.
  5246. // This assumes the predicated block for each vector lane is equally
  5247. // likely.
  5248. return Cost / getReciprocalPredBlockProb();
  5249. }
  5250. LLVM_FALLTHROUGH;
  5251. case Instruction::Add:
  5252. case Instruction::FAdd:
  5253. case Instruction::Sub:
  5254. case Instruction::FSub:
  5255. case Instruction::Mul:
  5256. case Instruction::FMul:
  5257. case Instruction::FDiv:
  5258. case Instruction::FRem:
  5259. case Instruction::Shl:
  5260. case Instruction::LShr:
  5261. case Instruction::AShr:
  5262. case Instruction::And:
  5263. case Instruction::Or:
  5264. case Instruction::Xor: {
  5265. // Since we will replace the stride by 1 the multiplication should go away.
  5266. if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
  5267. return 0;
  5268. // Certain instructions can be cheaper to vectorize if they have a constant
  5269. // second vector operand. One example of this are shifts on x86.
  5270. Value *Op2 = I->getOperand(1);
  5271. TargetTransformInfo::OperandValueProperties Op2VP;
  5272. TargetTransformInfo::OperandValueKind Op2VK =
  5273. TTI.getOperandInfo(Op2, Op2VP);
  5274. if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
  5275. Op2VK = TargetTransformInfo::OK_UniformValue;
  5276. SmallVector<const Value *, 4> Operands(I->operand_values());
  5277. unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
  5278. return N * TTI.getArithmeticInstrCost(
  5279. I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
  5280. Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands);
  5281. }
  5282. case Instruction::FNeg: {
  5283. unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
  5284. return N * TTI.getArithmeticInstrCost(
  5285. I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
  5286. TargetTransformInfo::OK_AnyValue,
  5287. TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
  5288. I->getOperand(0));
  5289. }
  5290. case Instruction::Select: {
  5291. SelectInst *SI = cast<SelectInst>(I);
  5292. const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
  5293. bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
  5294. Type *CondTy = SI->getCondition()->getType();
  5295. if (!ScalarCond)
  5296. CondTy = VectorType::get(CondTy, VF);
  5297. return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
  5298. }
  5299. case Instruction::ICmp:
  5300. case Instruction::FCmp: {
  5301. Type *ValTy = I->getOperand(0)->getType();
  5302. Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
  5303. if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
  5304. ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
  5305. VectorTy = ToVectorTy(ValTy, VF);
  5306. return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
  5307. }
  5308. case Instruction::Store:
  5309. case Instruction::Load: {
  5310. unsigned Width = VF;
  5311. if (Width > 1) {
  5312. InstWidening Decision = getWideningDecision(I, Width);
  5313. assert(Decision != CM_Unknown &&
  5314. "CM decision should be taken at this point");
  5315. if (Decision == CM_Scalarize)
  5316. Width = 1;
  5317. }
  5318. VectorTy = ToVectorTy(getMemInstValueType(I), Width);
  5319. return getMemoryInstructionCost(I, VF);
  5320. }
  5321. case Instruction::ZExt:
  5322. case Instruction::SExt:
  5323. case Instruction::FPToUI:
  5324. case Instruction::FPToSI:
  5325. case Instruction::FPExt:
  5326. case Instruction::PtrToInt:
  5327. case Instruction::IntToPtr:
  5328. case Instruction::SIToFP:
  5329. case Instruction::UIToFP:
  5330. case Instruction::Trunc:
  5331. case Instruction::FPTrunc:
  5332. case Instruction::BitCast: {
  5333. // We optimize the truncation of induction variables having constant
  5334. // integer steps. The cost of these truncations is the same as the scalar
  5335. // operation.
  5336. if (isOptimizableIVTruncate(I, VF)) {
  5337. auto *Trunc = cast<TruncInst>(I);
  5338. return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
  5339. Trunc->getSrcTy(), Trunc);
  5340. }
  5341. Type *SrcScalarTy = I->getOperand(0)->getType();
  5342. Type *SrcVecTy =
  5343. VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
  5344. if (canTruncateToMinimalBitwidth(I, VF)) {
  5345. // This cast is going to be shrunk. This may remove the cast or it might
  5346. // turn it into slightly different cast. For example, if MinBW == 16,
  5347. // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
  5348. //
  5349. // Calculate the modified src and dest types.
  5350. Type *MinVecTy = VectorTy;
  5351. if (I->getOpcode() == Instruction::Trunc) {
  5352. SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
  5353. VectorTy =
  5354. largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
  5355. } else if (I->getOpcode() == Instruction::ZExt ||
  5356. I->getOpcode() == Instruction::SExt) {
  5357. SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
  5358. VectorTy =
  5359. smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
  5360. }
  5361. }
  5362. unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
  5363. return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
  5364. }
  5365. case Instruction::Call: {
  5366. bool NeedToScalarize;
  5367. CallInst *CI = cast<CallInst>(I);
  5368. unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
  5369. if (getVectorIntrinsicIDForCall(CI, TLI))
  5370. return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
  5371. return CallCost;
  5372. }
  5373. default:
  5374. // The cost of executing VF copies of the scalar instruction. This opcode
  5375. // is unknown. Assume that it is the same as 'mul'.
  5376. return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
  5377. getScalarizationOverhead(I, VF);
  5378. } // end of switch.
  5379. }
  5380. char LoopVectorize::ID = 0;
  5381. static const char lv_name[] = "Loop Vectorization";
  5382. INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
  5383. INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
  5384. INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
  5385. INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
  5386. INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
  5387. INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
  5388. INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
  5389. INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
  5390. INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
  5391. INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
  5392. INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
  5393. INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
  5394. INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
  5395. INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
  5396. INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
  5397. namespace llvm {
  5398. Pass *createLoopVectorizePass() { return new LoopVectorize(); }
  5399. Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
  5400. bool VectorizeOnlyWhenForced) {
  5401. return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
  5402. }
  5403. } // end namespace llvm
  5404. bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
  5405. // Check if the pointer operand of a load or store instruction is
  5406. // consecutive.
  5407. if (auto *Ptr = getLoadStorePointerOperand(Inst))
  5408. return Legal->isConsecutivePtr(Ptr);
  5409. return false;
  5410. }
  5411. void LoopVectorizationCostModel::collectValuesToIgnore() {
  5412. // Ignore ephemeral values.
  5413. CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
  5414. // Ignore type-promoting instructions we identified during reduction
  5415. // detection.
  5416. for (auto &Reduction : *Legal->getReductionVars()) {
  5417. RecurrenceDescriptor &RedDes = Reduction.second;
  5418. SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
  5419. VecValuesToIgnore.insert(Casts.begin(), Casts.end());
  5420. }
  5421. // Ignore type-casting instructions we identified during induction
  5422. // detection.
  5423. for (auto &Induction : *Legal->getInductionVars()) {
  5424. InductionDescriptor &IndDes = Induction.second;
  5425. const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
  5426. VecValuesToIgnore.insert(Casts.begin(), Casts.end());
  5427. }
  5428. }
  5429. // TODO: we could return a pair of values that specify the max VF and
  5430. // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
  5431. // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
  5432. // doesn't have a cost model that can choose which plan to execute if
  5433. // more than one is generated.
  5434. static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
  5435. LoopVectorizationCostModel &CM) {
  5436. unsigned WidestType;
  5437. std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
  5438. return WidestVectorRegBits / WidestType;
  5439. }
  5440. VectorizationFactor
  5441. LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) {
  5442. unsigned VF = UserVF;
  5443. // Outer loop handling: They may require CFG and instruction level
  5444. // transformations before even evaluating whether vectorization is profitable.
  5445. // Since we cannot modify the incoming IR, we need to build VPlan upfront in
  5446. // the vectorization pipeline.
  5447. if (!OrigLoop->empty()) {
  5448. // If the user doesn't provide a vectorization factor, determine a
  5449. // reasonable one.
  5450. if (!UserVF) {
  5451. VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM);
  5452. LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
  5453. // Make sure we have a VF > 1 for stress testing.
  5454. if (VPlanBuildStressTest && VF < 2) {
  5455. LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
  5456. << "overriding computed VF.\n");
  5457. VF = 4;
  5458. }
  5459. }
  5460. assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
  5461. assert(isPowerOf2_32(VF) && "VF needs to be a power of two");
  5462. LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF
  5463. << " to build VPlans.\n");
  5464. buildVPlans(VF, VF);
  5465. // For VPlan build stress testing, we bail out after VPlan construction.
  5466. if (VPlanBuildStressTest)
  5467. return VectorizationFactor::Disabled();
  5468. return {VF, 0};
  5469. }
  5470. LLVM_DEBUG(
  5471. dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
  5472. "VPlan-native path.\n");
  5473. return VectorizationFactor::Disabled();
  5474. }
  5475. Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) {
  5476. assert(OrigLoop->empty() && "Inner loop expected.");
  5477. Optional<unsigned> MaybeMaxVF = CM.computeMaxVF();
  5478. if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
  5479. return None;
  5480. // Invalidate interleave groups if all blocks of loop will be predicated.
  5481. if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
  5482. !useMaskedInterleavedAccesses(*TTI)) {
  5483. LLVM_DEBUG(
  5484. dbgs()
  5485. << "LV: Invalidate all interleaved groups due to fold-tail by masking "
  5486. "which requires masked-interleaved support.\n");
  5487. CM.InterleaveInfo.reset();
  5488. }
  5489. if (UserVF) {
  5490. LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
  5491. assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
  5492. // Collect the instructions (and their associated costs) that will be more
  5493. // profitable to scalarize.
  5494. CM.selectUserVectorizationFactor(UserVF);
  5495. buildVPlansWithVPRecipes(UserVF, UserVF);
  5496. LLVM_DEBUG(printPlans(dbgs()));
  5497. return {{UserVF, 0}};
  5498. }
  5499. unsigned MaxVF = MaybeMaxVF.getValue();
  5500. assert(MaxVF != 0 && "MaxVF is zero.");
  5501. for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
  5502. // Collect Uniform and Scalar instructions after vectorization with VF.
  5503. CM.collectUniformsAndScalars(VF);
  5504. // Collect the instructions (and their associated costs) that will be more
  5505. // profitable to scalarize.
  5506. if (VF > 1)
  5507. CM.collectInstsToScalarize(VF);
  5508. }
  5509. buildVPlansWithVPRecipes(1, MaxVF);
  5510. LLVM_DEBUG(printPlans(dbgs()));
  5511. if (MaxVF == 1)
  5512. return VectorizationFactor::Disabled();
  5513. // Select the optimal vectorization factor.
  5514. return CM.selectVectorizationFactor(MaxVF);
  5515. }
  5516. void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
  5517. LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
  5518. << '\n');
  5519. BestVF = VF;
  5520. BestUF = UF;
  5521. erase_if(VPlans, [VF](const VPlanPtr &Plan) {
  5522. return !Plan->hasVF(VF);
  5523. });
  5524. assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
  5525. }
  5526. void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
  5527. DominatorTree *DT) {
  5528. // Perform the actual loop transformation.
  5529. // 1. Create a new empty loop. Unlink the old loop and connect the new one.
  5530. VPCallbackILV CallbackILV(ILV);
  5531. VPTransformState State{BestVF, BestUF, LI,
  5532. DT, ILV.Builder, ILV.VectorLoopValueMap,
  5533. &ILV, CallbackILV};
  5534. State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
  5535. State.TripCount = ILV.getOrCreateTripCount(nullptr);
  5536. //===------------------------------------------------===//
  5537. //
  5538. // Notice: any optimization or new instruction that go
  5539. // into the code below should also be implemented in
  5540. // the cost-model.
  5541. //
  5542. //===------------------------------------------------===//
  5543. // 2. Copy and widen instructions from the old loop into the new loop.
  5544. assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
  5545. VPlans.front()->execute(&State);
  5546. // 3. Fix the vectorized code: take care of header phi's, live-outs,
  5547. // predication, updating analyses.
  5548. ILV.fixVectorizedLoop();
  5549. }
  5550. void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
  5551. SmallPtrSetImpl<Instruction *> &DeadInstructions) {
  5552. BasicBlock *Latch = OrigLoop->getLoopLatch();
  5553. // We create new control-flow for the vectorized loop, so the original
  5554. // condition will be dead after vectorization if it's only used by the
  5555. // branch.
  5556. auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
  5557. if (Cmp && Cmp->hasOneUse())
  5558. DeadInstructions.insert(Cmp);
  5559. // We create new "steps" for induction variable updates to which the original
  5560. // induction variables map. An original update instruction will be dead if
  5561. // all its users except the induction variable are dead.
  5562. for (auto &Induction : *Legal->getInductionVars()) {
  5563. PHINode *Ind = Induction.first;
  5564. auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
  5565. if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
  5566. return U == Ind || DeadInstructions.find(cast<Instruction>(U)) !=
  5567. DeadInstructions.end();
  5568. }))
  5569. DeadInstructions.insert(IndUpdate);
  5570. // We record as "Dead" also the type-casting instructions we had identified
  5571. // during induction analysis. We don't need any handling for them in the
  5572. // vectorized loop because we have proven that, under a proper runtime
  5573. // test guarding the vectorized loop, the value of the phi, and the casted
  5574. // value of the phi, are the same. The last instruction in this casting chain
  5575. // will get its scalar/vector/widened def from the scalar/vector/widened def
  5576. // of the respective phi node. Any other casts in the induction def-use chain
  5577. // have no other uses outside the phi update chain, and will be ignored.
  5578. InductionDescriptor &IndDes = Induction.second;
  5579. const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
  5580. DeadInstructions.insert(Casts.begin(), Casts.end());
  5581. }
  5582. }
  5583. Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
  5584. Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
  5585. Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
  5586. Instruction::BinaryOps BinOp) {
  5587. // When unrolling and the VF is 1, we only need to add a simple scalar.
  5588. Type *Ty = Val->getType();
  5589. assert(!Ty->isVectorTy() && "Val must be a scalar");
  5590. if (Ty->isFloatingPointTy()) {
  5591. Constant *C = ConstantFP::get(Ty, (double)StartIdx);
  5592. // Floating point operations had to be 'fast' to enable the unrolling.
  5593. Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
  5594. return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
  5595. }
  5596. Constant *C = ConstantInt::get(Ty, StartIdx);
  5597. return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
  5598. }
  5599. static void AddRuntimeUnrollDisableMetaData(Loop *L) {
  5600. SmallVector<Metadata *, 4> MDs;
  5601. // Reserve first location for self reference to the LoopID metadata node.
  5602. MDs.push_back(nullptr);
  5603. bool IsUnrollMetadata = false;
  5604. MDNode *LoopID = L->getLoopID();
  5605. if (LoopID) {
  5606. // First find existing loop unrolling disable metadata.
  5607. for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
  5608. auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
  5609. if (MD) {
  5610. const auto *S = dyn_cast<MDString>(MD->getOperand(0));
  5611. IsUnrollMetadata =
  5612. S && S->getString().startswith("llvm.loop.unroll.disable");
  5613. }
  5614. MDs.push_back(LoopID->getOperand(i));
  5615. }
  5616. }
  5617. if (!IsUnrollMetadata) {
  5618. // Add runtime unroll disable metadata.
  5619. LLVMContext &Context = L->getHeader()->getContext();
  5620. SmallVector<Metadata *, 1> DisableOperands;
  5621. DisableOperands.push_back(
  5622. MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
  5623. MDNode *DisableNode = MDNode::get(Context, DisableOperands);
  5624. MDs.push_back(DisableNode);
  5625. MDNode *NewLoopID = MDNode::get(Context, MDs);
  5626. // Set operand 0 to refer to the loop id itself.
  5627. NewLoopID->replaceOperandWith(0, NewLoopID);
  5628. L->setLoopID(NewLoopID);
  5629. }
  5630. }
  5631. bool LoopVectorizationPlanner::getDecisionAndClampRange(
  5632. const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
  5633. assert(Range.End > Range.Start && "Trying to test an empty VF range.");
  5634. bool PredicateAtRangeStart = Predicate(Range.Start);
  5635. for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
  5636. if (Predicate(TmpVF) != PredicateAtRangeStart) {
  5637. Range.End = TmpVF;
  5638. break;
  5639. }
  5640. return PredicateAtRangeStart;
  5641. }
  5642. /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
  5643. /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
  5644. /// of VF's starting at a given VF and extending it as much as possible. Each
  5645. /// vectorization decision can potentially shorten this sub-range during
  5646. /// buildVPlan().
  5647. void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
  5648. for (unsigned VF = MinVF; VF < MaxVF + 1;) {
  5649. VFRange SubRange = {VF, MaxVF + 1};
  5650. VPlans.push_back(buildVPlan(SubRange));
  5651. VF = SubRange.End;
  5652. }
  5653. }
  5654. VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
  5655. VPlanPtr &Plan) {
  5656. assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
  5657. // Look for cached value.
  5658. std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
  5659. EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
  5660. if (ECEntryIt != EdgeMaskCache.end())
  5661. return ECEntryIt->second;
  5662. VPValue *SrcMask = createBlockInMask(Src, Plan);
  5663. // The terminator has to be a branch inst!
  5664. BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
  5665. assert(BI && "Unexpected terminator found");
  5666. if (!BI->isConditional())
  5667. return EdgeMaskCache[Edge] = SrcMask;
  5668. VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
  5669. assert(EdgeMask && "No Edge Mask found for condition");
  5670. if (BI->getSuccessor(0) != Dst)
  5671. EdgeMask = Builder.createNot(EdgeMask);
  5672. if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
  5673. EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
  5674. return EdgeMaskCache[Edge] = EdgeMask;
  5675. }
  5676. VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
  5677. assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
  5678. // Look for cached value.
  5679. BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
  5680. if (BCEntryIt != BlockMaskCache.end())
  5681. return BCEntryIt->second;
  5682. // All-one mask is modelled as no-mask following the convention for masked
  5683. // load/store/gather/scatter. Initialize BlockMask to no-mask.
  5684. VPValue *BlockMask = nullptr;
  5685. if (OrigLoop->getHeader() == BB) {
  5686. if (!CM.blockNeedsPredication(BB))
  5687. return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
  5688. // Introduce the early-exit compare IV <= BTC to form header block mask.
  5689. // This is used instead of IV < TC because TC may wrap, unlike BTC.
  5690. VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction());
  5691. VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
  5692. BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
  5693. return BlockMaskCache[BB] = BlockMask;
  5694. }
  5695. // This is the block mask. We OR all incoming edges.
  5696. for (auto *Predecessor : predecessors(BB)) {
  5697. VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
  5698. if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
  5699. return BlockMaskCache[BB] = EdgeMask;
  5700. if (!BlockMask) { // BlockMask has its initialized nullptr value.
  5701. BlockMask = EdgeMask;
  5702. continue;
  5703. }
  5704. BlockMask = Builder.createOr(BlockMask, EdgeMask);
  5705. }
  5706. return BlockMaskCache[BB] = BlockMask;
  5707. }
  5708. VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I,
  5709. VFRange &Range,
  5710. VPlanPtr &Plan) {
  5711. const InterleaveGroup<Instruction> *IG = CM.getInterleavedAccessGroup(I);
  5712. if (!IG)
  5713. return nullptr;
  5714. // Now check if IG is relevant for VF's in the given range.
  5715. auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> {
  5716. return [=](unsigned VF) -> bool {
  5717. return (VF >= 2 && // Query is illegal for VF == 1
  5718. CM.getWideningDecision(I, VF) ==
  5719. LoopVectorizationCostModel::CM_Interleave);
  5720. };
  5721. };
  5722. if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range))
  5723. return nullptr;
  5724. // I is a member of an InterleaveGroup for VF's in the (possibly trimmed)
  5725. // range. If it's the primary member of the IG construct a VPInterleaveRecipe.
  5726. // Otherwise, it's an adjunct member of the IG, do not construct any Recipe.
  5727. assert(I == IG->getInsertPos() &&
  5728. "Generating a recipe for an adjunct member of an interleave group");
  5729. VPValue *Mask = nullptr;
  5730. if (Legal->isMaskRequired(I))
  5731. Mask = createBlockInMask(I->getParent(), Plan);
  5732. return new VPInterleaveRecipe(IG, Mask);
  5733. }
  5734. VPWidenMemoryInstructionRecipe *
  5735. VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
  5736. VPlanPtr &Plan) {
  5737. if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
  5738. return nullptr;
  5739. auto willWiden = [&](unsigned VF) -> bool {
  5740. if (VF == 1)
  5741. return false;
  5742. if (CM.isScalarAfterVectorization(I, VF) ||
  5743. CM.isProfitableToScalarize(I, VF))
  5744. return false;
  5745. LoopVectorizationCostModel::InstWidening Decision =
  5746. CM.getWideningDecision(I, VF);
  5747. assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
  5748. "CM decision should be taken at this point.");
  5749. assert(Decision != LoopVectorizationCostModel::CM_Interleave &&
  5750. "Interleave memory opportunity should be caught earlier.");
  5751. return Decision != LoopVectorizationCostModel::CM_Scalarize;
  5752. };
  5753. if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
  5754. return nullptr;
  5755. VPValue *Mask = nullptr;
  5756. if (Legal->isMaskRequired(I))
  5757. Mask = createBlockInMask(I->getParent(), Plan);
  5758. return new VPWidenMemoryInstructionRecipe(*I, Mask);
  5759. }
  5760. VPWidenIntOrFpInductionRecipe *
  5761. VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) {
  5762. if (PHINode *Phi = dyn_cast<PHINode>(I)) {
  5763. // Check if this is an integer or fp induction. If so, build the recipe that
  5764. // produces its scalar and vector values.
  5765. InductionDescriptor II = Legal->getInductionVars()->lookup(Phi);
  5766. if (II.getKind() == InductionDescriptor::IK_IntInduction ||
  5767. II.getKind() == InductionDescriptor::IK_FpInduction)
  5768. return new VPWidenIntOrFpInductionRecipe(Phi);
  5769. return nullptr;
  5770. }
  5771. // Optimize the special case where the source is a constant integer
  5772. // induction variable. Notice that we can only optimize the 'trunc' case
  5773. // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
  5774. // (c) other casts depend on pointer size.
  5775. // Determine whether \p K is a truncation based on an induction variable that
  5776. // can be optimized.
  5777. auto isOptimizableIVTruncate =
  5778. [&](Instruction *K) -> std::function<bool(unsigned)> {
  5779. return
  5780. [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
  5781. };
  5782. if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange(
  5783. isOptimizableIVTruncate(I), Range))
  5784. return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
  5785. cast<TruncInst>(I));
  5786. return nullptr;
  5787. }
  5788. VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) {
  5789. PHINode *Phi = dyn_cast<PHINode>(I);
  5790. if (!Phi || Phi->getParent() == OrigLoop->getHeader())
  5791. return nullptr;
  5792. // We know that all PHIs in non-header blocks are converted into selects, so
  5793. // we don't have to worry about the insertion order and we can just use the
  5794. // builder. At this point we generate the predication tree. There may be
  5795. // duplications since this is a simple recursive scan, but future
  5796. // optimizations will clean it up.
  5797. SmallVector<VPValue *, 2> Masks;
  5798. unsigned NumIncoming = Phi->getNumIncomingValues();
  5799. for (unsigned In = 0; In < NumIncoming; In++) {
  5800. VPValue *EdgeMask =
  5801. createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
  5802. assert((EdgeMask || NumIncoming == 1) &&
  5803. "Multiple predecessors with one having a full mask");
  5804. if (EdgeMask)
  5805. Masks.push_back(EdgeMask);
  5806. }
  5807. return new VPBlendRecipe(Phi, Masks);
  5808. }
  5809. bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB,
  5810. VFRange &Range) {
  5811. bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
  5812. [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
  5813. if (IsPredicated)
  5814. return false;
  5815. auto IsVectorizableOpcode = [](unsigned Opcode) {
  5816. switch (Opcode) {
  5817. case Instruction::Add:
  5818. case Instruction::And:
  5819. case Instruction::AShr:
  5820. case Instruction::BitCast:
  5821. case Instruction::Br:
  5822. case Instruction::Call:
  5823. case Instruction::FAdd:
  5824. case Instruction::FCmp:
  5825. case Instruction::FDiv:
  5826. case Instruction::FMul:
  5827. case Instruction::FNeg:
  5828. case Instruction::FPExt:
  5829. case Instruction::FPToSI:
  5830. case Instruction::FPToUI:
  5831. case Instruction::FPTrunc:
  5832. case Instruction::FRem:
  5833. case Instruction::FSub:
  5834. case Instruction::GetElementPtr:
  5835. case Instruction::ICmp:
  5836. case Instruction::IntToPtr:
  5837. case Instruction::Load:
  5838. case Instruction::LShr:
  5839. case Instruction::Mul:
  5840. case Instruction::Or:
  5841. case Instruction::PHI:
  5842. case Instruction::PtrToInt:
  5843. case Instruction::SDiv:
  5844. case Instruction::Select:
  5845. case Instruction::SExt:
  5846. case Instruction::Shl:
  5847. case Instruction::SIToFP:
  5848. case Instruction::SRem:
  5849. case Instruction::Store:
  5850. case Instruction::Sub:
  5851. case Instruction::Trunc:
  5852. case Instruction::UDiv:
  5853. case Instruction::UIToFP:
  5854. case Instruction::URem:
  5855. case Instruction::Xor:
  5856. case Instruction::ZExt:
  5857. return true;
  5858. }
  5859. return false;
  5860. };
  5861. if (!IsVectorizableOpcode(I->getOpcode()))
  5862. return false;
  5863. if (CallInst *CI = dyn_cast<CallInst>(I)) {
  5864. Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
  5865. if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
  5866. ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
  5867. return false;
  5868. }
  5869. auto willWiden = [&](unsigned VF) -> bool {
  5870. if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
  5871. CM.isProfitableToScalarize(I, VF)))
  5872. return false;
  5873. if (CallInst *CI = dyn_cast<CallInst>(I)) {
  5874. Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
  5875. // The following case may be scalarized depending on the VF.
  5876. // The flag shows whether we use Intrinsic or a usual Call for vectorized
  5877. // version of the instruction.
  5878. // Is it beneficial to perform intrinsic call compared to lib call?
  5879. bool NeedToScalarize;
  5880. unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
  5881. bool UseVectorIntrinsic =
  5882. ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
  5883. return UseVectorIntrinsic || !NeedToScalarize;
  5884. }
  5885. if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
  5886. assert(CM.getWideningDecision(I, VF) ==
  5887. LoopVectorizationCostModel::CM_Scalarize &&
  5888. "Memory widening decisions should have been taken care by now");
  5889. return false;
  5890. }
  5891. return true;
  5892. };
  5893. if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
  5894. return false;
  5895. // Success: widen this instruction. We optimize the common case where
  5896. // consecutive instructions can be represented by a single recipe.
  5897. if (!VPBB->empty()) {
  5898. VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back());
  5899. if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I))
  5900. return true;
  5901. }
  5902. VPBB->appendRecipe(new VPWidenRecipe(I));
  5903. return true;
  5904. }
  5905. VPBasicBlock *VPRecipeBuilder::handleReplication(
  5906. Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
  5907. DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
  5908. VPlanPtr &Plan) {
  5909. bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
  5910. [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
  5911. Range);
  5912. bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
  5913. [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
  5914. auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
  5915. // Find if I uses a predicated instruction. If so, it will use its scalar
  5916. // value. Avoid hoisting the insert-element which packs the scalar value into
  5917. // a vector value, as that happens iff all users use the vector value.
  5918. for (auto &Op : I->operands())
  5919. if (auto *PredInst = dyn_cast<Instruction>(Op))
  5920. if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
  5921. PredInst2Recipe[PredInst]->setAlsoPack(false);
  5922. // Finalize the recipe for Instr, first if it is not predicated.
  5923. if (!IsPredicated) {
  5924. LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
  5925. VPBB->appendRecipe(Recipe);
  5926. return VPBB;
  5927. }
  5928. LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
  5929. assert(VPBB->getSuccessors().empty() &&
  5930. "VPBB has successors when handling predicated replication.");
  5931. // Record predicated instructions for above packing optimizations.
  5932. PredInst2Recipe[I] = Recipe;
  5933. VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
  5934. VPBlockUtils::insertBlockAfter(Region, VPBB);
  5935. auto *RegSucc = new VPBasicBlock();
  5936. VPBlockUtils::insertBlockAfter(RegSucc, Region);
  5937. return RegSucc;
  5938. }
  5939. VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
  5940. VPRecipeBase *PredRecipe,
  5941. VPlanPtr &Plan) {
  5942. // Instructions marked for predication are replicated and placed under an
  5943. // if-then construct to prevent side-effects.
  5944. // Generate recipes to compute the block mask for this region.
  5945. VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
  5946. // Build the triangular if-then region.
  5947. std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
  5948. assert(Instr->getParent() && "Predicated instruction not in any basic block");
  5949. auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
  5950. auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
  5951. auto *PHIRecipe =
  5952. Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
  5953. auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
  5954. auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
  5955. VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
  5956. // Note: first set Entry as region entry and then connect successors starting
  5957. // from it in order, to propagate the "parent" of each VPBasicBlock.
  5958. VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
  5959. VPBlockUtils::connectBlocks(Pred, Exit);
  5960. return Region;
  5961. }
  5962. bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range,
  5963. VPlanPtr &Plan, VPBasicBlock *VPBB) {
  5964. VPRecipeBase *Recipe = nullptr;
  5965. // Check if Instr should belong to an interleave memory recipe, or already
  5966. // does. In the latter case Instr is irrelevant.
  5967. if ((Recipe = tryToInterleaveMemory(Instr, Range, Plan))) {
  5968. VPBB->appendRecipe(Recipe);
  5969. return true;
  5970. }
  5971. // Check if Instr is a memory operation that should be widened.
  5972. if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) {
  5973. VPBB->appendRecipe(Recipe);
  5974. return true;
  5975. }
  5976. // Check if Instr should form some PHI recipe.
  5977. if ((Recipe = tryToOptimizeInduction(Instr, Range))) {
  5978. VPBB->appendRecipe(Recipe);
  5979. return true;
  5980. }
  5981. if ((Recipe = tryToBlend(Instr, Plan))) {
  5982. VPBB->appendRecipe(Recipe);
  5983. return true;
  5984. }
  5985. if (PHINode *Phi = dyn_cast<PHINode>(Instr)) {
  5986. VPBB->appendRecipe(new VPWidenPHIRecipe(Phi));
  5987. return true;
  5988. }
  5989. // Check if Instr is to be widened by a general VPWidenRecipe, after
  5990. // having first checked for specific widening recipes that deal with
  5991. // Interleave Groups, Inductions and Phi nodes.
  5992. if (tryToWiden(Instr, VPBB, Range))
  5993. return true;
  5994. return false;
  5995. }
  5996. void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
  5997. unsigned MaxVF) {
  5998. assert(OrigLoop->empty() && "Inner loop expected.");
  5999. // Collect conditions feeding internal conditional branches; they need to be
  6000. // represented in VPlan for it to model masking.
  6001. SmallPtrSet<Value *, 1> NeedDef;
  6002. auto *Latch = OrigLoop->getLoopLatch();
  6003. for (BasicBlock *BB : OrigLoop->blocks()) {
  6004. if (BB == Latch)
  6005. continue;
  6006. BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
  6007. if (Branch && Branch->isConditional())
  6008. NeedDef.insert(Branch->getCondition());
  6009. }
  6010. // If the tail is to be folded by masking, the primary induction variable
  6011. // needs to be represented in VPlan for it to model early-exit masking.
  6012. // Also, both the Phi and the live-out instruction of each reduction are
  6013. // required in order to introduce a select between them in VPlan.
  6014. if (CM.foldTailByMasking()) {
  6015. NeedDef.insert(Legal->getPrimaryInduction());
  6016. for (auto &Reduction : *Legal->getReductionVars()) {
  6017. NeedDef.insert(Reduction.first);
  6018. NeedDef.insert(Reduction.second.getLoopExitInstr());
  6019. }
  6020. }
  6021. // Collect instructions from the original loop that will become trivially dead
  6022. // in the vectorized loop. We don't need to vectorize these instructions. For
  6023. // example, original induction update instructions can become dead because we
  6024. // separately emit induction "steps" when generating code for the new loop.
  6025. // Similarly, we create a new latch condition when setting up the structure
  6026. // of the new loop, so the old one can become dead.
  6027. SmallPtrSet<Instruction *, 4> DeadInstructions;
  6028. collectTriviallyDeadInstructions(DeadInstructions);
  6029. for (unsigned VF = MinVF; VF < MaxVF + 1;) {
  6030. VFRange SubRange = {VF, MaxVF + 1};
  6031. VPlans.push_back(
  6032. buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions));
  6033. VF = SubRange.End;
  6034. }
  6035. }
  6036. VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
  6037. VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
  6038. SmallPtrSetImpl<Instruction *> &DeadInstructions) {
  6039. // Hold a mapping from predicated instructions to their recipes, in order to
  6040. // fix their AlsoPack behavior if a user is determined to replicate and use a
  6041. // scalar instead of vector value.
  6042. DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
  6043. DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
  6044. DenseMap<Instruction *, Instruction *> SinkAfterInverse;
  6045. // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
  6046. VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
  6047. auto Plan = std::make_unique<VPlan>(VPBB);
  6048. VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, Builder);
  6049. // Represent values that will have defs inside VPlan.
  6050. for (Value *V : NeedDef)
  6051. Plan->addVPValue(V);
  6052. // Scan the body of the loop in a topological order to visit each basic block
  6053. // after having visited its predecessor basic blocks.
  6054. LoopBlocksDFS DFS(OrigLoop);
  6055. DFS.perform(LI);
  6056. for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
  6057. // Relevant instructions from basic block BB will be grouped into VPRecipe
  6058. // ingredients and fill a new VPBasicBlock.
  6059. unsigned VPBBsForBB = 0;
  6060. auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
  6061. VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
  6062. VPBB = FirstVPBBForBB;
  6063. Builder.setInsertPoint(VPBB);
  6064. std::vector<Instruction *> Ingredients;
  6065. // Organize the ingredients to vectorize from current basic block in the
  6066. // right order.
  6067. for (Instruction &I : BB->instructionsWithoutDebug()) {
  6068. Instruction *Instr = &I;
  6069. // First filter out irrelevant instructions, to ensure no recipes are
  6070. // built for them.
  6071. if (isa<BranchInst>(Instr) ||
  6072. DeadInstructions.find(Instr) != DeadInstructions.end())
  6073. continue;
  6074. // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct
  6075. // member of the IG, do not construct any Recipe for it.
  6076. const InterleaveGroup<Instruction> *IG =
  6077. CM.getInterleavedAccessGroup(Instr);
  6078. if (IG && Instr != IG->getInsertPos() &&
  6079. Range.Start >= 2 && // Query is illegal for VF == 1
  6080. CM.getWideningDecision(Instr, Range.Start) ==
  6081. LoopVectorizationCostModel::CM_Interleave) {
  6082. auto SinkCandidate = SinkAfterInverse.find(Instr);
  6083. if (SinkCandidate != SinkAfterInverse.end())
  6084. Ingredients.push_back(SinkCandidate->second);
  6085. continue;
  6086. }
  6087. // Move instructions to handle first-order recurrences, step 1: avoid
  6088. // handling this instruction until after we've handled the instruction it
  6089. // should follow.
  6090. auto SAIt = SinkAfter.find(Instr);
  6091. if (SAIt != SinkAfter.end()) {
  6092. LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after"
  6093. << *SAIt->second
  6094. << " to vectorize a 1st order recurrence.\n");
  6095. SinkAfterInverse[SAIt->second] = Instr;
  6096. continue;
  6097. }
  6098. Ingredients.push_back(Instr);
  6099. // Move instructions to handle first-order recurrences, step 2: push the
  6100. // instruction to be sunk at its insertion point.
  6101. auto SAInvIt = SinkAfterInverse.find(Instr);
  6102. if (SAInvIt != SinkAfterInverse.end())
  6103. Ingredients.push_back(SAInvIt->second);
  6104. }
  6105. // Introduce each ingredient into VPlan.
  6106. for (Instruction *Instr : Ingredients) {
  6107. if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB))
  6108. continue;
  6109. // Otherwise, if all widening options failed, Instruction is to be
  6110. // replicated. This may create a successor for VPBB.
  6111. VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
  6112. Instr, Range, VPBB, PredInst2Recipe, Plan);
  6113. if (NextVPBB != VPBB) {
  6114. VPBB = NextVPBB;
  6115. VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
  6116. : "");
  6117. }
  6118. }
  6119. }
  6120. // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
  6121. // may also be empty, such as the last one VPBB, reflecting original
  6122. // basic-blocks with no recipes.
  6123. VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
  6124. assert(PreEntry->empty() && "Expecting empty pre-entry block.");
  6125. VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
  6126. VPBlockUtils::disconnectBlocks(PreEntry, Entry);
  6127. delete PreEntry;
  6128. // Finally, if tail is folded by masking, introduce selects between the phi
  6129. // and the live-out instruction of each reduction, at the end of the latch.
  6130. if (CM.foldTailByMasking()) {
  6131. Builder.setInsertPoint(VPBB);
  6132. auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
  6133. for (auto &Reduction : *Legal->getReductionVars()) {
  6134. VPValue *Phi = Plan->getVPValue(Reduction.first);
  6135. VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
  6136. Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
  6137. }
  6138. }
  6139. std::string PlanName;
  6140. raw_string_ostream RSO(PlanName);
  6141. unsigned VF = Range.Start;
  6142. Plan->addVF(VF);
  6143. RSO << "Initial VPlan for VF={" << VF;
  6144. for (VF *= 2; VF < Range.End; VF *= 2) {
  6145. Plan->addVF(VF);
  6146. RSO << "," << VF;
  6147. }
  6148. RSO << "},UF>=1";
  6149. RSO.flush();
  6150. Plan->setName(PlanName);
  6151. return Plan;
  6152. }
  6153. VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
  6154. // Outer loop handling: They may require CFG and instruction level
  6155. // transformations before even evaluating whether vectorization is profitable.
  6156. // Since we cannot modify the incoming IR, we need to build VPlan upfront in
  6157. // the vectorization pipeline.
  6158. assert(!OrigLoop->empty());
  6159. assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
  6160. // Create new empty VPlan
  6161. auto Plan = std::make_unique<VPlan>();
  6162. // Build hierarchical CFG
  6163. VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
  6164. HCFGBuilder.buildHierarchicalCFG();
  6165. for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
  6166. Plan->addVF(VF);
  6167. if (EnableVPlanPredication) {
  6168. VPlanPredicator VPP(*Plan);
  6169. VPP.predicate();
  6170. // Avoid running transformation to recipes until masked code generation in
  6171. // VPlan-native path is in place.
  6172. return Plan;
  6173. }
  6174. SmallPtrSet<Instruction *, 1> DeadInstructions;
  6175. VPlanHCFGTransforms::VPInstructionsToVPRecipes(
  6176. Plan, Legal->getInductionVars(), DeadInstructions);
  6177. return Plan;
  6178. }
  6179. Value* LoopVectorizationPlanner::VPCallbackILV::
  6180. getOrCreateVectorValues(Value *V, unsigned Part) {
  6181. return ILV.getOrCreateVectorValue(V, Part);
  6182. }
  6183. void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const {
  6184. O << " +\n"
  6185. << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
  6186. IG->getInsertPos()->printAsOperand(O, false);
  6187. if (User) {
  6188. O << ", ";
  6189. User->getOperand(0)->printAsOperand(O);
  6190. }
  6191. O << "\\l\"";
  6192. for (unsigned i = 0; i < IG->getFactor(); ++i)
  6193. if (Instruction *I = IG->getMember(i))
  6194. O << " +\n"
  6195. << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\"";
  6196. }
  6197. void VPWidenRecipe::execute(VPTransformState &State) {
  6198. for (auto &Instr : make_range(Begin, End))
  6199. State.ILV->widenInstruction(Instr);
  6200. }
  6201. void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
  6202. assert(!State.Instance && "Int or FP induction being replicated.");
  6203. State.ILV->widenIntOrFpInduction(IV, Trunc);
  6204. }
  6205. void VPWidenPHIRecipe::execute(VPTransformState &State) {
  6206. State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
  6207. }
  6208. void VPBlendRecipe::execute(VPTransformState &State) {
  6209. State.ILV->setDebugLocFromInst(State.Builder, Phi);
  6210. // We know that all PHIs in non-header blocks are converted into
  6211. // selects, so we don't have to worry about the insertion order and we
  6212. // can just use the builder.
  6213. // At this point we generate the predication tree. There may be
  6214. // duplications since this is a simple recursive scan, but future
  6215. // optimizations will clean it up.
  6216. unsigned NumIncoming = Phi->getNumIncomingValues();
  6217. assert((User || NumIncoming == 1) &&
  6218. "Multiple predecessors with predecessors having a full mask");
  6219. // Generate a sequence of selects of the form:
  6220. // SELECT(Mask3, In3,
  6221. // SELECT(Mask2, In2,
  6222. // ( ...)))
  6223. InnerLoopVectorizer::VectorParts Entry(State.UF);
  6224. for (unsigned In = 0; In < NumIncoming; ++In) {
  6225. for (unsigned Part = 0; Part < State.UF; ++Part) {
  6226. // We might have single edge PHIs (blocks) - use an identity
  6227. // 'select' for the first PHI operand.
  6228. Value *In0 =
  6229. State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part);
  6230. if (In == 0)
  6231. Entry[Part] = In0; // Initialize with the first incoming value.
  6232. else {
  6233. // Select between the current value and the previous incoming edge
  6234. // based on the incoming mask.
  6235. Value *Cond = State.get(User->getOperand(In), Part);
  6236. Entry[Part] =
  6237. State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
  6238. }
  6239. }
  6240. }
  6241. for (unsigned Part = 0; Part < State.UF; ++Part)
  6242. State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
  6243. }
  6244. void VPInterleaveRecipe::execute(VPTransformState &State) {
  6245. assert(!State.Instance && "Interleave group being replicated.");
  6246. if (!User)
  6247. return State.ILV->vectorizeInterleaveGroup(IG->getInsertPos());
  6248. // Last (and currently only) operand is a mask.
  6249. InnerLoopVectorizer::VectorParts MaskValues(State.UF);
  6250. VPValue *Mask = User->getOperand(User->getNumOperands() - 1);
  6251. for (unsigned Part = 0; Part < State.UF; ++Part)
  6252. MaskValues[Part] = State.get(Mask, Part);
  6253. State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), &MaskValues);
  6254. }
  6255. void VPReplicateRecipe::execute(VPTransformState &State) {
  6256. if (State.Instance) { // Generate a single instance.
  6257. State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
  6258. // Insert scalar instance packing it into a vector.
  6259. if (AlsoPack && State.VF > 1) {
  6260. // If we're constructing lane 0, initialize to start from undef.
  6261. if (State.Instance->Lane == 0) {
  6262. Value *Undef =
  6263. UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
  6264. State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
  6265. }
  6266. State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
  6267. }
  6268. return;
  6269. }
  6270. // Generate scalar instances for all VF lanes of all UF parts, unless the
  6271. // instruction is uniform inwhich case generate only the first lane for each
  6272. // of the UF parts.
  6273. unsigned EndLane = IsUniform ? 1 : State.VF;
  6274. for (unsigned Part = 0; Part < State.UF; ++Part)
  6275. for (unsigned Lane = 0; Lane < EndLane; ++Lane)
  6276. State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
  6277. }
  6278. void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
  6279. assert(State.Instance && "Branch on Mask works only on single instance.");
  6280. unsigned Part = State.Instance->Part;
  6281. unsigned Lane = State.Instance->Lane;
  6282. Value *ConditionBit = nullptr;
  6283. if (!User) // Block in mask is all-one.
  6284. ConditionBit = State.Builder.getTrue();
  6285. else {
  6286. VPValue *BlockInMask = User->getOperand(0);
  6287. ConditionBit = State.get(BlockInMask, Part);
  6288. if (ConditionBit->getType()->isVectorTy())
  6289. ConditionBit = State.Builder.CreateExtractElement(
  6290. ConditionBit, State.Builder.getInt32(Lane));
  6291. }
  6292. // Replace the temporary unreachable terminator with a new conditional branch,
  6293. // whose two destinations will be set later when they are created.
  6294. auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
  6295. assert(isa<UnreachableInst>(CurrentTerminator) &&
  6296. "Expected to replace unreachable terminator with conditional branch.");
  6297. auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
  6298. CondBr->setSuccessor(0, nullptr);
  6299. ReplaceInstWithInst(CurrentTerminator, CondBr);
  6300. }
  6301. void VPPredInstPHIRecipe::execute(VPTransformState &State) {
  6302. assert(State.Instance && "Predicated instruction PHI works per instance.");
  6303. Instruction *ScalarPredInst = cast<Instruction>(
  6304. State.ValueMap.getScalarValue(PredInst, *State.Instance));
  6305. BasicBlock *PredicatedBB = ScalarPredInst->getParent();
  6306. BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
  6307. assert(PredicatingBB && "Predicated block has no single predecessor.");
  6308. // By current pack/unpack logic we need to generate only a single phi node: if
  6309. // a vector value for the predicated instruction exists at this point it means
  6310. // the instruction has vector users only, and a phi for the vector value is
  6311. // needed. In this case the recipe of the predicated instruction is marked to
  6312. // also do that packing, thereby "hoisting" the insert-element sequence.
  6313. // Otherwise, a phi node for the scalar value is needed.
  6314. unsigned Part = State.Instance->Part;
  6315. if (State.ValueMap.hasVectorValue(PredInst, Part)) {
  6316. Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
  6317. InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
  6318. PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
  6319. VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
  6320. VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
  6321. State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
  6322. } else {
  6323. Type *PredInstType = PredInst->getType();
  6324. PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
  6325. Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
  6326. Phi->addIncoming(ScalarPredInst, PredicatedBB);
  6327. State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
  6328. }
  6329. }
  6330. void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
  6331. if (!User)
  6332. return State.ILV->vectorizeMemoryInstruction(&Instr);
  6333. // Last (and currently only) operand is a mask.
  6334. InnerLoopVectorizer::VectorParts MaskValues(State.UF);
  6335. VPValue *Mask = User->getOperand(User->getNumOperands() - 1);
  6336. for (unsigned Part = 0; Part < State.UF; ++Part)
  6337. MaskValues[Part] = State.get(Mask, Part);
  6338. State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues);
  6339. }
  6340. static ScalarEpilogueLowering
  6341. getScalarEpilogueLowering(Function *F, Loop *L, LoopVectorizeHints &Hints,
  6342. ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
  6343. ScalarEpilogueLowering SEL = CM_ScalarEpilogueAllowed;
  6344. if (Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
  6345. (F->hasOptSize() ||
  6346. llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI)))
  6347. SEL = CM_ScalarEpilogueNotAllowedOptSize;
  6348. else if (PreferPredicateOverEpilog || Hints.getPredicate())
  6349. SEL = CM_ScalarEpilogueNotNeededUsePredicate;
  6350. return SEL;
  6351. }
  6352. // Process the loop in the VPlan-native vectorization path. This path builds
  6353. // VPlan upfront in the vectorization pipeline, which allows to apply
  6354. // VPlan-to-VPlan transformations from the very beginning without modifying the
  6355. // input LLVM IR.
  6356. static bool processLoopInVPlanNativePath(
  6357. Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
  6358. LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
  6359. TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
  6360. OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
  6361. ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
  6362. assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
  6363. Function *F = L->getHeader()->getParent();
  6364. InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
  6365. ScalarEpilogueLowering SEL = getScalarEpilogueLowering(F, L, Hints, PSI, BFI);
  6366. LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
  6367. &Hints, IAI);
  6368. // Use the planner for outer loop vectorization.
  6369. // TODO: CM is not used at this point inside the planner. Turn CM into an
  6370. // optional argument if we don't need it in the future.
  6371. LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM);
  6372. // Get user vectorization factor.
  6373. const unsigned UserVF = Hints.getWidth();
  6374. // Plan how to best vectorize, return the best VF and its cost.
  6375. const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
  6376. // If we are stress testing VPlan builds, do not attempt to generate vector
  6377. // code. Masked vector code generation support will follow soon.
  6378. // Also, do not attempt to vectorize if no vector code will be produced.
  6379. if (VPlanBuildStressTest || EnableVPlanPredication ||
  6380. VectorizationFactor::Disabled() == VF)
  6381. return false;
  6382. LVP.setBestPlan(VF.Width, 1);
  6383. InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
  6384. &CM);
  6385. LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
  6386. << L->getHeader()->getParent()->getName() << "\"\n");
  6387. LVP.executePlan(LB, DT);
  6388. // Mark the loop as already vectorized to avoid vectorizing again.
  6389. Hints.setAlreadyVectorized();
  6390. LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
  6391. return true;
  6392. }
  6393. bool LoopVectorizePass::processLoop(Loop *L) {
  6394. assert((EnableVPlanNativePath || L->empty()) &&
  6395. "VPlan-native path is not enabled. Only process inner loops.");
  6396. #ifndef NDEBUG
  6397. const std::string DebugLocStr = getDebugLocString(L);
  6398. #endif /* NDEBUG */
  6399. LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
  6400. << L->getHeader()->getParent()->getName() << "\" from "
  6401. << DebugLocStr << "\n");
  6402. LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
  6403. LLVM_DEBUG(
  6404. dbgs() << "LV: Loop hints:"
  6405. << " force="
  6406. << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
  6407. ? "disabled"
  6408. : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
  6409. ? "enabled"
  6410. : "?"))
  6411. << " width=" << Hints.getWidth()
  6412. << " unroll=" << Hints.getInterleave() << "\n");
  6413. // Function containing loop
  6414. Function *F = L->getHeader()->getParent();
  6415. // Looking at the diagnostic output is the only way to determine if a loop
  6416. // was vectorized (other than looking at the IR or machine code), so it
  6417. // is important to generate an optimization remark for each loop. Most of
  6418. // these messages are generated as OptimizationRemarkAnalysis. Remarks
  6419. // generated as OptimizationRemark and OptimizationRemarkMissed are
  6420. // less verbose reporting vectorized loops and unvectorized loops that may
  6421. // benefit from vectorization, respectively.
  6422. if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
  6423. LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
  6424. return false;
  6425. }
  6426. PredicatedScalarEvolution PSE(*SE, *L);
  6427. // Check if it is legal to vectorize the loop.
  6428. LoopVectorizationRequirements Requirements(*ORE);
  6429. LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
  6430. &Requirements, &Hints, DB, AC);
  6431. if (!LVL.canVectorize(EnableVPlanNativePath)) {
  6432. LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
  6433. Hints.emitRemarkWithHints();
  6434. return false;
  6435. }
  6436. // Check the function attributes and profiles to find out if this function
  6437. // should be optimized for size.
  6438. ScalarEpilogueLowering SEL = getScalarEpilogueLowering(F, L, Hints, PSI, BFI);
  6439. // Entrance to the VPlan-native vectorization path. Outer loops are processed
  6440. // here. They may require CFG and instruction level transformations before
  6441. // even evaluating whether vectorization is profitable. Since we cannot modify
  6442. // the incoming IR, we need to build VPlan upfront in the vectorization
  6443. // pipeline.
  6444. if (!L->empty())
  6445. return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
  6446. ORE, BFI, PSI, Hints);
  6447. assert(L->empty() && "Inner loop expected.");
  6448. // Check the loop for a trip count threshold: vectorize loops with a tiny trip
  6449. // count by optimizing for size, to minimize overheads.
  6450. // Prefer constant trip counts over profile data, over upper bound estimate.
  6451. unsigned ExpectedTC = 0;
  6452. bool HasExpectedTC = false;
  6453. if (const SCEVConstant *ConstExits =
  6454. dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) {
  6455. const APInt &ExitsCount = ConstExits->getAPInt();
  6456. // We are interested in small values for ExpectedTC. Skip over those that
  6457. // can't fit an unsigned.
  6458. if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) {
  6459. ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1;
  6460. HasExpectedTC = true;
  6461. }
  6462. }
  6463. // ExpectedTC may be large because it's bound by a variable. Check
  6464. // profiling information to validate we should vectorize.
  6465. if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) {
  6466. auto EstimatedTC = getLoopEstimatedTripCount(L);
  6467. if (EstimatedTC) {
  6468. ExpectedTC = *EstimatedTC;
  6469. HasExpectedTC = true;
  6470. }
  6471. }
  6472. if (!HasExpectedTC) {
  6473. ExpectedTC = SE->getSmallConstantMaxTripCount(L);
  6474. HasExpectedTC = (ExpectedTC > 0);
  6475. }
  6476. if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) {
  6477. LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
  6478. << "This loop is worth vectorizing only if no scalar "
  6479. << "iteration overheads are incurred.");
  6480. if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
  6481. LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
  6482. else {
  6483. LLVM_DEBUG(dbgs() << "\n");
  6484. SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
  6485. }
  6486. }
  6487. // Check the function attributes to see if implicit floats are allowed.
  6488. // FIXME: This check doesn't seem possibly correct -- what if the loop is
  6489. // an integer loop and the vector instructions selected are purely integer
  6490. // vector instructions?
  6491. if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
  6492. reportVectorizationFailure(
  6493. "Can't vectorize when the NoImplicitFloat attribute is used",
  6494. "loop not vectorized due to NoImplicitFloat attribute",
  6495. "NoImplicitFloat", ORE, L);
  6496. Hints.emitRemarkWithHints();
  6497. return false;
  6498. }
  6499. // Check if the target supports potentially unsafe FP vectorization.
  6500. // FIXME: Add a check for the type of safety issue (denormal, signaling)
  6501. // for the target we're vectorizing for, to make sure none of the
  6502. // additional fp-math flags can help.
  6503. if (Hints.isPotentiallyUnsafe() &&
  6504. TTI->isFPVectorizationPotentiallyUnsafe()) {
  6505. reportVectorizationFailure(
  6506. "Potentially unsafe FP op prevents vectorization",
  6507. "loop not vectorized due to unsafe FP support.",
  6508. "UnsafeFP", ORE, L);
  6509. Hints.emitRemarkWithHints();
  6510. return false;
  6511. }
  6512. bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
  6513. InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
  6514. // If an override option has been passed in for interleaved accesses, use it.
  6515. if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
  6516. UseInterleaved = EnableInterleavedMemAccesses;
  6517. // Analyze interleaved memory accesses.
  6518. if (UseInterleaved) {
  6519. IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
  6520. }
  6521. // Use the cost model.
  6522. LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
  6523. F, &Hints, IAI);
  6524. CM.collectValuesToIgnore();
  6525. // Use the planner for vectorization.
  6526. LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM);
  6527. // Get user vectorization factor.
  6528. unsigned UserVF = Hints.getWidth();
  6529. // Plan how to best vectorize, return the best VF and its cost.
  6530. Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF);
  6531. VectorizationFactor VF = VectorizationFactor::Disabled();
  6532. unsigned IC = 1;
  6533. unsigned UserIC = Hints.getInterleave();
  6534. if (MaybeVF) {
  6535. VF = *MaybeVF;
  6536. // Select the interleave count.
  6537. IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
  6538. }
  6539. // Identify the diagnostic messages that should be produced.
  6540. std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
  6541. bool VectorizeLoop = true, InterleaveLoop = true;
  6542. if (Requirements.doesNotMeet(F, L, Hints)) {
  6543. LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
  6544. "requirements.\n");
  6545. Hints.emitRemarkWithHints();
  6546. return false;
  6547. }
  6548. if (VF.Width == 1) {
  6549. LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
  6550. VecDiagMsg = std::make_pair(
  6551. "VectorizationNotBeneficial",
  6552. "the cost-model indicates that vectorization is not beneficial");
  6553. VectorizeLoop = false;
  6554. }
  6555. if (!MaybeVF && UserIC > 1) {
  6556. // Tell the user interleaving was avoided up-front, despite being explicitly
  6557. // requested.
  6558. LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
  6559. "interleaving should be avoided up front\n");
  6560. IntDiagMsg = std::make_pair(
  6561. "InterleavingAvoided",
  6562. "Ignoring UserIC, because interleaving was avoided up front");
  6563. InterleaveLoop = false;
  6564. } else if (IC == 1 && UserIC <= 1) {
  6565. // Tell the user interleaving is not beneficial.
  6566. LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
  6567. IntDiagMsg = std::make_pair(
  6568. "InterleavingNotBeneficial",
  6569. "the cost-model indicates that interleaving is not beneficial");
  6570. InterleaveLoop = false;
  6571. if (UserIC == 1) {
  6572. IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
  6573. IntDiagMsg.second +=
  6574. " and is explicitly disabled or interleave count is set to 1";
  6575. }
  6576. } else if (IC > 1 && UserIC == 1) {
  6577. // Tell the user interleaving is beneficial, but it explicitly disabled.
  6578. LLVM_DEBUG(
  6579. dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
  6580. IntDiagMsg = std::make_pair(
  6581. "InterleavingBeneficialButDisabled",
  6582. "the cost-model indicates that interleaving is beneficial "
  6583. "but is explicitly disabled or interleave count is set to 1");
  6584. InterleaveLoop = false;
  6585. }
  6586. // Override IC if user provided an interleave count.
  6587. IC = UserIC > 0 ? UserIC : IC;
  6588. // Emit diagnostic messages, if any.
  6589. const char *VAPassName = Hints.vectorizeAnalysisPassName();
  6590. if (!VectorizeLoop && !InterleaveLoop) {
  6591. // Do not vectorize or interleaving the loop.
  6592. ORE->emit([&]() {
  6593. return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
  6594. L->getStartLoc(), L->getHeader())
  6595. << VecDiagMsg.second;
  6596. });
  6597. ORE->emit([&]() {
  6598. return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
  6599. L->getStartLoc(), L->getHeader())
  6600. << IntDiagMsg.second;
  6601. });
  6602. return false;
  6603. } else if (!VectorizeLoop && InterleaveLoop) {
  6604. LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
  6605. ORE->emit([&]() {
  6606. return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
  6607. L->getStartLoc(), L->getHeader())
  6608. << VecDiagMsg.second;
  6609. });
  6610. } else if (VectorizeLoop && !InterleaveLoop) {
  6611. LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
  6612. << ") in " << DebugLocStr << '\n');
  6613. ORE->emit([&]() {
  6614. return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
  6615. L->getStartLoc(), L->getHeader())
  6616. << IntDiagMsg.second;
  6617. });
  6618. } else if (VectorizeLoop && InterleaveLoop) {
  6619. LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
  6620. << ") in " << DebugLocStr << '\n');
  6621. LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
  6622. }
  6623. LVP.setBestPlan(VF.Width, IC);
  6624. using namespace ore;
  6625. bool DisableRuntimeUnroll = false;
  6626. MDNode *OrigLoopID = L->getLoopID();
  6627. if (!VectorizeLoop) {
  6628. assert(IC > 1 && "interleave count should not be 1 or 0");
  6629. // If we decided that it is not legal to vectorize the loop, then
  6630. // interleave it.
  6631. InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
  6632. &CM);
  6633. LVP.executePlan(Unroller, DT);
  6634. ORE->emit([&]() {
  6635. return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
  6636. L->getHeader())
  6637. << "interleaved loop (interleaved count: "
  6638. << NV("InterleaveCount", IC) << ")";
  6639. });
  6640. } else {
  6641. // If we decided that it is *legal* to vectorize the loop, then do it.
  6642. InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
  6643. &LVL, &CM);
  6644. LVP.executePlan(LB, DT);
  6645. ++LoopsVectorized;
  6646. // Add metadata to disable runtime unrolling a scalar loop when there are
  6647. // no runtime checks about strides and memory. A scalar loop that is
  6648. // rarely used is not worth unrolling.
  6649. if (!LB.areSafetyChecksAdded())
  6650. DisableRuntimeUnroll = true;
  6651. // Report the vectorization decision.
  6652. ORE->emit([&]() {
  6653. return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
  6654. L->getHeader())
  6655. << "vectorized loop (vectorization width: "
  6656. << NV("VectorizationFactor", VF.Width)
  6657. << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
  6658. });
  6659. }
  6660. Optional<MDNode *> RemainderLoopID =
  6661. makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
  6662. LLVMLoopVectorizeFollowupEpilogue});
  6663. if (RemainderLoopID.hasValue()) {
  6664. L->setLoopID(RemainderLoopID.getValue());
  6665. } else {
  6666. if (DisableRuntimeUnroll)
  6667. AddRuntimeUnrollDisableMetaData(L);
  6668. // Mark the loop as already vectorized to avoid vectorizing again.
  6669. Hints.setAlreadyVectorized();
  6670. }
  6671. LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
  6672. return true;
  6673. }
  6674. bool LoopVectorizePass::runImpl(
  6675. Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
  6676. DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
  6677. DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
  6678. std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
  6679. OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
  6680. SE = &SE_;
  6681. LI = &LI_;
  6682. TTI = &TTI_;
  6683. DT = &DT_;
  6684. BFI = &BFI_;
  6685. TLI = TLI_;
  6686. AA = &AA_;
  6687. AC = &AC_;
  6688. GetLAA = &GetLAA_;
  6689. DB = &DB_;
  6690. ORE = &ORE_;
  6691. PSI = PSI_;
  6692. // Don't attempt if
  6693. // 1. the target claims to have no vector registers, and
  6694. // 2. interleaving won't help ILP.
  6695. //
  6696. // The second condition is necessary because, even if the target has no
  6697. // vector registers, loop vectorization may still enable scalar
  6698. // interleaving.
  6699. if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
  6700. return false;
  6701. bool Changed = false;
  6702. // The vectorizer requires loops to be in simplified form.
  6703. // Since simplification may add new inner loops, it has to run before the
  6704. // legality and profitability checks. This means running the loop vectorizer
  6705. // will simplify all loops, regardless of whether anything end up being
  6706. // vectorized.
  6707. for (auto &L : *LI)
  6708. Changed |=
  6709. simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
  6710. // Build up a worklist of inner-loops to vectorize. This is necessary as
  6711. // the act of vectorizing or partially unrolling a loop creates new loops
  6712. // and can invalidate iterators across the loops.
  6713. SmallVector<Loop *, 8> Worklist;
  6714. for (Loop *L : *LI)
  6715. collectSupportedLoops(*L, LI, ORE, Worklist);
  6716. LoopsAnalyzed += Worklist.size();
  6717. // Now walk the identified inner loops.
  6718. while (!Worklist.empty()) {
  6719. Loop *L = Worklist.pop_back_val();
  6720. // For the inner loops we actually process, form LCSSA to simplify the
  6721. // transform.
  6722. Changed |= formLCSSARecursively(*L, *DT, LI, SE);
  6723. Changed |= processLoop(L);
  6724. }
  6725. // Process each loop nest in the function.
  6726. return Changed;
  6727. }
  6728. PreservedAnalyses LoopVectorizePass::run(Function &F,
  6729. FunctionAnalysisManager &AM) {
  6730. auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
  6731. auto &LI = AM.getResult<LoopAnalysis>(F);
  6732. auto &TTI = AM.getResult<TargetIRAnalysis>(F);
  6733. auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
  6734. auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
  6735. auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
  6736. auto &AA = AM.getResult<AAManager>(F);
  6737. auto &AC = AM.getResult<AssumptionAnalysis>(F);
  6738. auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
  6739. auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
  6740. MemorySSA *MSSA = EnableMSSALoopDependency
  6741. ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
  6742. : nullptr;
  6743. auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
  6744. std::function<const LoopAccessInfo &(Loop &)> GetLAA =
  6745. [&](Loop &L) -> const LoopAccessInfo & {
  6746. LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
  6747. return LAM.getResult<LoopAccessAnalysis>(L, AR);
  6748. };
  6749. const ModuleAnalysisManager &MAM =
  6750. AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
  6751. ProfileSummaryInfo *PSI =
  6752. MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
  6753. bool Changed =
  6754. runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
  6755. if (!Changed)
  6756. return PreservedAnalyses::all();
  6757. PreservedAnalyses PA;
  6758. // We currently do not preserve loopinfo/dominator analyses with outer loop
  6759. // vectorization. Until this is addressed, mark these analyses as preserved
  6760. // only for non-VPlan-native path.
  6761. // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
  6762. if (!EnableVPlanNativePath) {
  6763. PA.preserve<LoopAnalysis>();
  6764. PA.preserve<DominatorTreeAnalysis>();
  6765. }
  6766. PA.preserve<BasicAA>();
  6767. PA.preserve<GlobalsAA>();
  6768. return PA;
  6769. }