ScalarEvolution.cpp 277 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027
  1. //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file contains the implementation of the scalar evolution analysis
  11. // engine, which is used primarily to analyze expressions involving induction
  12. // variables in loops.
  13. //
  14. // There are several aspects to this library. First is the representation of
  15. // scalar expressions, which are represented as subclasses of the SCEV class.
  16. // These classes are used to represent certain types of subexpressions that we
  17. // can handle. We only create one SCEV of a particular shape, so
  18. // pointer-comparisons for equality are legal.
  19. //
  20. // One important aspect of the SCEV objects is that they are never cyclic, even
  21. // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
  22. // the PHI node is one of the idioms that we can represent (e.g., a polynomial
  23. // recurrence) then we represent it directly as a recurrence node, otherwise we
  24. // represent it as a SCEVUnknown node.
  25. //
  26. // In addition to being able to represent expressions of various types, we also
  27. // have folders that are used to build the *canonical* representation for a
  28. // particular expression. These folders are capable of using a variety of
  29. // rewrite rules to simplify the expressions.
  30. //
  31. // Once the folders are defined, we can implement the more interesting
  32. // higher-level code, such as the code that recognizes PHI nodes of various
  33. // types, computes the execution count of a loop, etc.
  34. //
  35. // TODO: We should use these routines and value representations to implement
  36. // dependence analysis!
  37. //
  38. //===----------------------------------------------------------------------===//
  39. //
  40. // There are several good references for the techniques used in this analysis.
  41. //
  42. // Chains of recurrences -- a method to expedite the evaluation
  43. // of closed-form functions
  44. // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
  45. //
  46. // On computational properties of chains of recurrences
  47. // Eugene V. Zima
  48. //
  49. // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
  50. // Robert A. van Engelen
  51. //
  52. // Efficient Symbolic Analysis for Optimizing Compilers
  53. // Robert A. van Engelen
  54. //
  55. // Using the chains of recurrences algebra for data dependence testing and
  56. // induction variable substitution
  57. // MS Thesis, Johnie Birch
  58. //
  59. //===----------------------------------------------------------------------===//
  60. #define DEBUG_TYPE "scalar-evolution"
  61. #include "llvm/Analysis/ScalarEvolutionExpressions.h"
  62. #include "llvm/Constants.h"
  63. #include "llvm/DerivedTypes.h"
  64. #include "llvm/GlobalVariable.h"
  65. #include "llvm/GlobalAlias.h"
  66. #include "llvm/Instructions.h"
  67. #include "llvm/LLVMContext.h"
  68. #include "llvm/Operator.h"
  69. #include "llvm/Analysis/ConstantFolding.h"
  70. #include "llvm/Analysis/Dominators.h"
  71. #include "llvm/Analysis/InstructionSimplify.h"
  72. #include "llvm/Analysis/LoopInfo.h"
  73. #include "llvm/Analysis/ValueTracking.h"
  74. #include "llvm/Assembly/Writer.h"
  75. #include "llvm/DataLayout.h"
  76. #include "llvm/Target/TargetLibraryInfo.h"
  77. #include "llvm/Support/CommandLine.h"
  78. #include "llvm/Support/ConstantRange.h"
  79. #include "llvm/Support/Debug.h"
  80. #include "llvm/Support/ErrorHandling.h"
  81. #include "llvm/Support/GetElementPtrTypeIterator.h"
  82. #include "llvm/Support/InstIterator.h"
  83. #include "llvm/Support/MathExtras.h"
  84. #include "llvm/Support/raw_ostream.h"
  85. #include "llvm/ADT/Statistic.h"
  86. #include "llvm/ADT/STLExtras.h"
  87. #include "llvm/ADT/SmallPtrSet.h"
  88. #include <algorithm>
  89. using namespace llvm;
  90. STATISTIC(NumArrayLenItCounts,
  91. "Number of trip counts computed with array length");
  92. STATISTIC(NumTripCountsComputed,
  93. "Number of loops with predictable loop counts");
  94. STATISTIC(NumTripCountsNotComputed,
  95. "Number of loops without predictable loop counts");
  96. STATISTIC(NumBruteForceTripCountsComputed,
  97. "Number of loops with trip counts computed by force");
  98. static cl::opt<unsigned>
  99. MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
  100. cl::desc("Maximum number of iterations SCEV will "
  101. "symbolically execute a constant "
  102. "derived loop"),
  103. cl::init(100));
  104. // FIXME: Enable this with XDEBUG when the test suite is clean.
  105. static cl::opt<bool>
  106. VerifySCEV("verify-scev",
  107. cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
  108. INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
  109. "Scalar Evolution Analysis", false, true)
  110. INITIALIZE_PASS_DEPENDENCY(LoopInfo)
  111. INITIALIZE_PASS_DEPENDENCY(DominatorTree)
  112. INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
  113. INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
  114. "Scalar Evolution Analysis", false, true)
  115. char ScalarEvolution::ID = 0;
  116. //===----------------------------------------------------------------------===//
  117. // SCEV class definitions
  118. //===----------------------------------------------------------------------===//
  119. //===----------------------------------------------------------------------===//
  120. // Implementation of the SCEV class.
  121. //
  122. #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  123. void SCEV::dump() const {
  124. print(dbgs());
  125. dbgs() << '\n';
  126. }
  127. #endif
  128. void SCEV::print(raw_ostream &OS) const {
  129. switch (getSCEVType()) {
  130. case scConstant:
  131. WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false);
  132. return;
  133. case scTruncate: {
  134. const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
  135. const SCEV *Op = Trunc->getOperand();
  136. OS << "(trunc " << *Op->getType() << " " << *Op << " to "
  137. << *Trunc->getType() << ")";
  138. return;
  139. }
  140. case scZeroExtend: {
  141. const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
  142. const SCEV *Op = ZExt->getOperand();
  143. OS << "(zext " << *Op->getType() << " " << *Op << " to "
  144. << *ZExt->getType() << ")";
  145. return;
  146. }
  147. case scSignExtend: {
  148. const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
  149. const SCEV *Op = SExt->getOperand();
  150. OS << "(sext " << *Op->getType() << " " << *Op << " to "
  151. << *SExt->getType() << ")";
  152. return;
  153. }
  154. case scAddRecExpr: {
  155. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
  156. OS << "{" << *AR->getOperand(0);
  157. for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
  158. OS << ",+," << *AR->getOperand(i);
  159. OS << "}<";
  160. if (AR->getNoWrapFlags(FlagNUW))
  161. OS << "nuw><";
  162. if (AR->getNoWrapFlags(FlagNSW))
  163. OS << "nsw><";
  164. if (AR->getNoWrapFlags(FlagNW) &&
  165. !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
  166. OS << "nw><";
  167. WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
  168. OS << ">";
  169. return;
  170. }
  171. case scAddExpr:
  172. case scMulExpr:
  173. case scUMaxExpr:
  174. case scSMaxExpr: {
  175. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
  176. const char *OpStr = 0;
  177. switch (NAry->getSCEVType()) {
  178. case scAddExpr: OpStr = " + "; break;
  179. case scMulExpr: OpStr = " * "; break;
  180. case scUMaxExpr: OpStr = " umax "; break;
  181. case scSMaxExpr: OpStr = " smax "; break;
  182. }
  183. OS << "(";
  184. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  185. I != E; ++I) {
  186. OS << **I;
  187. if (llvm::next(I) != E)
  188. OS << OpStr;
  189. }
  190. OS << ")";
  191. switch (NAry->getSCEVType()) {
  192. case scAddExpr:
  193. case scMulExpr:
  194. if (NAry->getNoWrapFlags(FlagNUW))
  195. OS << "<nuw>";
  196. if (NAry->getNoWrapFlags(FlagNSW))
  197. OS << "<nsw>";
  198. }
  199. return;
  200. }
  201. case scUDivExpr: {
  202. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
  203. OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
  204. return;
  205. }
  206. case scUnknown: {
  207. const SCEVUnknown *U = cast<SCEVUnknown>(this);
  208. Type *AllocTy;
  209. if (U->isSizeOf(AllocTy)) {
  210. OS << "sizeof(" << *AllocTy << ")";
  211. return;
  212. }
  213. if (U->isAlignOf(AllocTy)) {
  214. OS << "alignof(" << *AllocTy << ")";
  215. return;
  216. }
  217. Type *CTy;
  218. Constant *FieldNo;
  219. if (U->isOffsetOf(CTy, FieldNo)) {
  220. OS << "offsetof(" << *CTy << ", ";
  221. WriteAsOperand(OS, FieldNo, false);
  222. OS << ")";
  223. return;
  224. }
  225. // Otherwise just print it normally.
  226. WriteAsOperand(OS, U->getValue(), false);
  227. return;
  228. }
  229. case scCouldNotCompute:
  230. OS << "***COULDNOTCOMPUTE***";
  231. return;
  232. default: break;
  233. }
  234. llvm_unreachable("Unknown SCEV kind!");
  235. }
  236. Type *SCEV::getType() const {
  237. switch (getSCEVType()) {
  238. case scConstant:
  239. return cast<SCEVConstant>(this)->getType();
  240. case scTruncate:
  241. case scZeroExtend:
  242. case scSignExtend:
  243. return cast<SCEVCastExpr>(this)->getType();
  244. case scAddRecExpr:
  245. case scMulExpr:
  246. case scUMaxExpr:
  247. case scSMaxExpr:
  248. return cast<SCEVNAryExpr>(this)->getType();
  249. case scAddExpr:
  250. return cast<SCEVAddExpr>(this)->getType();
  251. case scUDivExpr:
  252. return cast<SCEVUDivExpr>(this)->getType();
  253. case scUnknown:
  254. return cast<SCEVUnknown>(this)->getType();
  255. case scCouldNotCompute:
  256. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  257. default:
  258. llvm_unreachable("Unknown SCEV kind!");
  259. }
  260. }
  261. bool SCEV::isZero() const {
  262. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  263. return SC->getValue()->isZero();
  264. return false;
  265. }
  266. bool SCEV::isOne() const {
  267. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  268. return SC->getValue()->isOne();
  269. return false;
  270. }
  271. bool SCEV::isAllOnesValue() const {
  272. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
  273. return SC->getValue()->isAllOnesValue();
  274. return false;
  275. }
  276. /// isNonConstantNegative - Return true if the specified scev is negated, but
  277. /// not a constant.
  278. bool SCEV::isNonConstantNegative() const {
  279. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
  280. if (!Mul) return false;
  281. // If there is a constant factor, it will be first.
  282. const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
  283. if (!SC) return false;
  284. // Return true if the value is negative, this matches things like (-42 * V).
  285. return SC->getValue()->getValue().isNegative();
  286. }
  287. SCEVCouldNotCompute::SCEVCouldNotCompute() :
  288. SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
  289. bool SCEVCouldNotCompute::classof(const SCEV *S) {
  290. return S->getSCEVType() == scCouldNotCompute;
  291. }
  292. const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
  293. FoldingSetNodeID ID;
  294. ID.AddInteger(scConstant);
  295. ID.AddPointer(V);
  296. void *IP = 0;
  297. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  298. SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
  299. UniqueSCEVs.InsertNode(S, IP);
  300. return S;
  301. }
  302. const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
  303. return getConstant(ConstantInt::get(getContext(), Val));
  304. }
  305. const SCEV *
  306. ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
  307. IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
  308. return getConstant(ConstantInt::get(ITy, V, isSigned));
  309. }
  310. SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
  311. unsigned SCEVTy, const SCEV *op, Type *ty)
  312. : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
  313. SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
  314. const SCEV *op, Type *ty)
  315. : SCEVCastExpr(ID, scTruncate, op, ty) {
  316. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  317. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  318. "Cannot truncate non-integer value!");
  319. }
  320. SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
  321. const SCEV *op, Type *ty)
  322. : SCEVCastExpr(ID, scZeroExtend, op, ty) {
  323. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  324. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  325. "Cannot zero extend non-integer value!");
  326. }
  327. SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
  328. const SCEV *op, Type *ty)
  329. : SCEVCastExpr(ID, scSignExtend, op, ty) {
  330. assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
  331. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  332. "Cannot sign extend non-integer value!");
  333. }
  334. void SCEVUnknown::deleted() {
  335. // Clear this SCEVUnknown from various maps.
  336. SE->forgetMemoizedResults(this);
  337. // Remove this SCEVUnknown from the uniquing map.
  338. SE->UniqueSCEVs.RemoveNode(this);
  339. // Release the value.
  340. setValPtr(0);
  341. }
  342. void SCEVUnknown::allUsesReplacedWith(Value *New) {
  343. // Clear this SCEVUnknown from various maps.
  344. SE->forgetMemoizedResults(this);
  345. // Remove this SCEVUnknown from the uniquing map.
  346. SE->UniqueSCEVs.RemoveNode(this);
  347. // Update this SCEVUnknown to point to the new value. This is needed
  348. // because there may still be outstanding SCEVs which still point to
  349. // this SCEVUnknown.
  350. setValPtr(New);
  351. }
  352. bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
  353. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  354. if (VCE->getOpcode() == Instruction::PtrToInt)
  355. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  356. if (CE->getOpcode() == Instruction::GetElementPtr &&
  357. CE->getOperand(0)->isNullValue() &&
  358. CE->getNumOperands() == 2)
  359. if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
  360. if (CI->isOne()) {
  361. AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
  362. ->getElementType();
  363. return true;
  364. }
  365. return false;
  366. }
  367. bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
  368. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  369. if (VCE->getOpcode() == Instruction::PtrToInt)
  370. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  371. if (CE->getOpcode() == Instruction::GetElementPtr &&
  372. CE->getOperand(0)->isNullValue()) {
  373. Type *Ty =
  374. cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
  375. if (StructType *STy = dyn_cast<StructType>(Ty))
  376. if (!STy->isPacked() &&
  377. CE->getNumOperands() == 3 &&
  378. CE->getOperand(1)->isNullValue()) {
  379. if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
  380. if (CI->isOne() &&
  381. STy->getNumElements() == 2 &&
  382. STy->getElementType(0)->isIntegerTy(1)) {
  383. AllocTy = STy->getElementType(1);
  384. return true;
  385. }
  386. }
  387. }
  388. return false;
  389. }
  390. bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
  391. if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
  392. if (VCE->getOpcode() == Instruction::PtrToInt)
  393. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
  394. if (CE->getOpcode() == Instruction::GetElementPtr &&
  395. CE->getNumOperands() == 3 &&
  396. CE->getOperand(0)->isNullValue() &&
  397. CE->getOperand(1)->isNullValue()) {
  398. Type *Ty =
  399. cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
  400. // Ignore vector types here so that ScalarEvolutionExpander doesn't
  401. // emit getelementptrs that index into vectors.
  402. if (Ty->isStructTy() || Ty->isArrayTy()) {
  403. CTy = Ty;
  404. FieldNo = CE->getOperand(2);
  405. return true;
  406. }
  407. }
  408. return false;
  409. }
  410. //===----------------------------------------------------------------------===//
  411. // SCEV Utilities
  412. //===----------------------------------------------------------------------===//
  413. namespace {
  414. /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
  415. /// than the complexity of the RHS. This comparator is used to canonicalize
  416. /// expressions.
  417. class SCEVComplexityCompare {
  418. const LoopInfo *const LI;
  419. public:
  420. explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
  421. // Return true or false if LHS is less than, or at least RHS, respectively.
  422. bool operator()(const SCEV *LHS, const SCEV *RHS) const {
  423. return compare(LHS, RHS) < 0;
  424. }
  425. // Return negative, zero, or positive, if LHS is less than, equal to, or
  426. // greater than RHS, respectively. A three-way result allows recursive
  427. // comparisons to be more efficient.
  428. int compare(const SCEV *LHS, const SCEV *RHS) const {
  429. // Fast-path: SCEVs are uniqued so we can do a quick equality check.
  430. if (LHS == RHS)
  431. return 0;
  432. // Primarily, sort the SCEVs by their getSCEVType().
  433. unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
  434. if (LType != RType)
  435. return (int)LType - (int)RType;
  436. // Aside from the getSCEVType() ordering, the particular ordering
  437. // isn't very important except that it's beneficial to be consistent,
  438. // so that (a + b) and (b + a) don't end up as different expressions.
  439. switch (LType) {
  440. case scUnknown: {
  441. const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
  442. const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
  443. // Sort SCEVUnknown values with some loose heuristics. TODO: This is
  444. // not as complete as it could be.
  445. const Value *LV = LU->getValue(), *RV = RU->getValue();
  446. // Order pointer values after integer values. This helps SCEVExpander
  447. // form GEPs.
  448. bool LIsPointer = LV->getType()->isPointerTy(),
  449. RIsPointer = RV->getType()->isPointerTy();
  450. if (LIsPointer != RIsPointer)
  451. return (int)LIsPointer - (int)RIsPointer;
  452. // Compare getValueID values.
  453. unsigned LID = LV->getValueID(),
  454. RID = RV->getValueID();
  455. if (LID != RID)
  456. return (int)LID - (int)RID;
  457. // Sort arguments by their position.
  458. if (const Argument *LA = dyn_cast<Argument>(LV)) {
  459. const Argument *RA = cast<Argument>(RV);
  460. unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
  461. return (int)LArgNo - (int)RArgNo;
  462. }
  463. // For instructions, compare their loop depth, and their operand
  464. // count. This is pretty loose.
  465. if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
  466. const Instruction *RInst = cast<Instruction>(RV);
  467. // Compare loop depths.
  468. const BasicBlock *LParent = LInst->getParent(),
  469. *RParent = RInst->getParent();
  470. if (LParent != RParent) {
  471. unsigned LDepth = LI->getLoopDepth(LParent),
  472. RDepth = LI->getLoopDepth(RParent);
  473. if (LDepth != RDepth)
  474. return (int)LDepth - (int)RDepth;
  475. }
  476. // Compare the number of operands.
  477. unsigned LNumOps = LInst->getNumOperands(),
  478. RNumOps = RInst->getNumOperands();
  479. return (int)LNumOps - (int)RNumOps;
  480. }
  481. return 0;
  482. }
  483. case scConstant: {
  484. const SCEVConstant *LC = cast<SCEVConstant>(LHS);
  485. const SCEVConstant *RC = cast<SCEVConstant>(RHS);
  486. // Compare constant values.
  487. const APInt &LA = LC->getValue()->getValue();
  488. const APInt &RA = RC->getValue()->getValue();
  489. unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
  490. if (LBitWidth != RBitWidth)
  491. return (int)LBitWidth - (int)RBitWidth;
  492. return LA.ult(RA) ? -1 : 1;
  493. }
  494. case scAddRecExpr: {
  495. const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
  496. const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
  497. // Compare addrec loop depths.
  498. const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
  499. if (LLoop != RLoop) {
  500. unsigned LDepth = LLoop->getLoopDepth(),
  501. RDepth = RLoop->getLoopDepth();
  502. if (LDepth != RDepth)
  503. return (int)LDepth - (int)RDepth;
  504. }
  505. // Addrec complexity grows with operand count.
  506. unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
  507. if (LNumOps != RNumOps)
  508. return (int)LNumOps - (int)RNumOps;
  509. // Lexicographically compare.
  510. for (unsigned i = 0; i != LNumOps; ++i) {
  511. long X = compare(LA->getOperand(i), RA->getOperand(i));
  512. if (X != 0)
  513. return X;
  514. }
  515. return 0;
  516. }
  517. case scAddExpr:
  518. case scMulExpr:
  519. case scSMaxExpr:
  520. case scUMaxExpr: {
  521. const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
  522. const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
  523. // Lexicographically compare n-ary expressions.
  524. unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
  525. for (unsigned i = 0; i != LNumOps; ++i) {
  526. if (i >= RNumOps)
  527. return 1;
  528. long X = compare(LC->getOperand(i), RC->getOperand(i));
  529. if (X != 0)
  530. return X;
  531. }
  532. return (int)LNumOps - (int)RNumOps;
  533. }
  534. case scUDivExpr: {
  535. const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
  536. const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
  537. // Lexicographically compare udiv expressions.
  538. long X = compare(LC->getLHS(), RC->getLHS());
  539. if (X != 0)
  540. return X;
  541. return compare(LC->getRHS(), RC->getRHS());
  542. }
  543. case scTruncate:
  544. case scZeroExtend:
  545. case scSignExtend: {
  546. const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
  547. const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
  548. // Compare cast expressions by operand.
  549. return compare(LC->getOperand(), RC->getOperand());
  550. }
  551. default:
  552. llvm_unreachable("Unknown SCEV kind!");
  553. }
  554. }
  555. };
  556. }
  557. /// GroupByComplexity - Given a list of SCEV objects, order them by their
  558. /// complexity, and group objects of the same complexity together by value.
  559. /// When this routine is finished, we know that any duplicates in the vector are
  560. /// consecutive and that complexity is monotonically increasing.
  561. ///
  562. /// Note that we go take special precautions to ensure that we get deterministic
  563. /// results from this routine. In other words, we don't want the results of
  564. /// this to depend on where the addresses of various SCEV objects happened to
  565. /// land in memory.
  566. ///
  567. static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
  568. LoopInfo *LI) {
  569. if (Ops.size() < 2) return; // Noop
  570. if (Ops.size() == 2) {
  571. // This is the common case, which also happens to be trivially simple.
  572. // Special case it.
  573. const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
  574. if (SCEVComplexityCompare(LI)(RHS, LHS))
  575. std::swap(LHS, RHS);
  576. return;
  577. }
  578. // Do the rough sort by complexity.
  579. std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
  580. // Now that we are sorted by complexity, group elements of the same
  581. // complexity. Note that this is, at worst, N^2, but the vector is likely to
  582. // be extremely short in practice. Note that we take this approach because we
  583. // do not want to depend on the addresses of the objects we are grouping.
  584. for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
  585. const SCEV *S = Ops[i];
  586. unsigned Complexity = S->getSCEVType();
  587. // If there are any objects of the same complexity and same value as this
  588. // one, group them.
  589. for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
  590. if (Ops[j] == S) { // Found a duplicate.
  591. // Move it to immediately after i'th element.
  592. std::swap(Ops[i+1], Ops[j]);
  593. ++i; // no need to rescan it.
  594. if (i == e-2) return; // Done!
  595. }
  596. }
  597. }
  598. }
  599. //===----------------------------------------------------------------------===//
  600. // Simple SCEV method implementations
  601. //===----------------------------------------------------------------------===//
  602. /// BinomialCoefficient - Compute BC(It, K). The result has width W.
  603. /// Assume, K > 0.
  604. static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
  605. ScalarEvolution &SE,
  606. Type *ResultTy) {
  607. // Handle the simplest case efficiently.
  608. if (K == 1)
  609. return SE.getTruncateOrZeroExtend(It, ResultTy);
  610. // We are using the following formula for BC(It, K):
  611. //
  612. // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
  613. //
  614. // Suppose, W is the bitwidth of the return value. We must be prepared for
  615. // overflow. Hence, we must assure that the result of our computation is
  616. // equal to the accurate one modulo 2^W. Unfortunately, division isn't
  617. // safe in modular arithmetic.
  618. //
  619. // However, this code doesn't use exactly that formula; the formula it uses
  620. // is something like the following, where T is the number of factors of 2 in
  621. // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
  622. // exponentiation:
  623. //
  624. // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
  625. //
  626. // This formula is trivially equivalent to the previous formula. However,
  627. // this formula can be implemented much more efficiently. The trick is that
  628. // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
  629. // arithmetic. To do exact division in modular arithmetic, all we have
  630. // to do is multiply by the inverse. Therefore, this step can be done at
  631. // width W.
  632. //
  633. // The next issue is how to safely do the division by 2^T. The way this
  634. // is done is by doing the multiplication step at a width of at least W + T
  635. // bits. This way, the bottom W+T bits of the product are accurate. Then,
  636. // when we perform the division by 2^T (which is equivalent to a right shift
  637. // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
  638. // truncated out after the division by 2^T.
  639. //
  640. // In comparison to just directly using the first formula, this technique
  641. // is much more efficient; using the first formula requires W * K bits,
  642. // but this formula less than W + K bits. Also, the first formula requires
  643. // a division step, whereas this formula only requires multiplies and shifts.
  644. //
  645. // It doesn't matter whether the subtraction step is done in the calculation
  646. // width or the input iteration count's width; if the subtraction overflows,
  647. // the result must be zero anyway. We prefer here to do it in the width of
  648. // the induction variable because it helps a lot for certain cases; CodeGen
  649. // isn't smart enough to ignore the overflow, which leads to much less
  650. // efficient code if the width of the subtraction is wider than the native
  651. // register width.
  652. //
  653. // (It's possible to not widen at all by pulling out factors of 2 before
  654. // the multiplication; for example, K=2 can be calculated as
  655. // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
  656. // extra arithmetic, so it's not an obvious win, and it gets
  657. // much more complicated for K > 3.)
  658. // Protection from insane SCEVs; this bound is conservative,
  659. // but it probably doesn't matter.
  660. if (K > 1000)
  661. return SE.getCouldNotCompute();
  662. unsigned W = SE.getTypeSizeInBits(ResultTy);
  663. // Calculate K! / 2^T and T; we divide out the factors of two before
  664. // multiplying for calculating K! / 2^T to avoid overflow.
  665. // Other overflow doesn't matter because we only care about the bottom
  666. // W bits of the result.
  667. APInt OddFactorial(W, 1);
  668. unsigned T = 1;
  669. for (unsigned i = 3; i <= K; ++i) {
  670. APInt Mult(W, i);
  671. unsigned TwoFactors = Mult.countTrailingZeros();
  672. T += TwoFactors;
  673. Mult = Mult.lshr(TwoFactors);
  674. OddFactorial *= Mult;
  675. }
  676. // We need at least W + T bits for the multiplication step
  677. unsigned CalculationBits = W + T;
  678. // Calculate 2^T, at width T+W.
  679. APInt DivFactor = APInt(CalculationBits, 1).shl(T);
  680. // Calculate the multiplicative inverse of K! / 2^T;
  681. // this multiplication factor will perform the exact division by
  682. // K! / 2^T.
  683. APInt Mod = APInt::getSignedMinValue(W+1);
  684. APInt MultiplyFactor = OddFactorial.zext(W+1);
  685. MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
  686. MultiplyFactor = MultiplyFactor.trunc(W);
  687. // Calculate the product, at width T+W
  688. IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
  689. CalculationBits);
  690. const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
  691. for (unsigned i = 1; i != K; ++i) {
  692. const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
  693. Dividend = SE.getMulExpr(Dividend,
  694. SE.getTruncateOrZeroExtend(S, CalculationTy));
  695. }
  696. // Divide by 2^T
  697. const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
  698. // Truncate the result, and divide by K! / 2^T.
  699. return SE.getMulExpr(SE.getConstant(MultiplyFactor),
  700. SE.getTruncateOrZeroExtend(DivResult, ResultTy));
  701. }
  702. /// evaluateAtIteration - Return the value of this chain of recurrences at
  703. /// the specified iteration number. We can evaluate this recurrence by
  704. /// multiplying each element in the chain by the binomial coefficient
  705. /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
  706. ///
  707. /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
  708. ///
  709. /// where BC(It, k) stands for binomial coefficient.
  710. ///
  711. const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
  712. ScalarEvolution &SE) const {
  713. const SCEV *Result = getStart();
  714. for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
  715. // The computation is correct in the face of overflow provided that the
  716. // multiplication is performed _after_ the evaluation of the binomial
  717. // coefficient.
  718. const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
  719. if (isa<SCEVCouldNotCompute>(Coeff))
  720. return Coeff;
  721. Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
  722. }
  723. return Result;
  724. }
  725. //===----------------------------------------------------------------------===//
  726. // SCEV Expression folder implementations
  727. //===----------------------------------------------------------------------===//
  728. const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
  729. Type *Ty) {
  730. assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
  731. "This is not a truncating conversion!");
  732. assert(isSCEVable(Ty) &&
  733. "This is not a conversion to a SCEVable type!");
  734. Ty = getEffectiveSCEVType(Ty);
  735. FoldingSetNodeID ID;
  736. ID.AddInteger(scTruncate);
  737. ID.AddPointer(Op);
  738. ID.AddPointer(Ty);
  739. void *IP = 0;
  740. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  741. // Fold if the operand is constant.
  742. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  743. return getConstant(
  744. cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
  745. // trunc(trunc(x)) --> trunc(x)
  746. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
  747. return getTruncateExpr(ST->getOperand(), Ty);
  748. // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
  749. if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
  750. return getTruncateOrSignExtend(SS->getOperand(), Ty);
  751. // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
  752. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  753. return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
  754. // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
  755. // eliminate all the truncates.
  756. if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
  757. SmallVector<const SCEV *, 4> Operands;
  758. bool hasTrunc = false;
  759. for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
  760. const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
  761. hasTrunc = isa<SCEVTruncateExpr>(S);
  762. Operands.push_back(S);
  763. }
  764. if (!hasTrunc)
  765. return getAddExpr(Operands);
  766. UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
  767. }
  768. // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
  769. // eliminate all the truncates.
  770. if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
  771. SmallVector<const SCEV *, 4> Operands;
  772. bool hasTrunc = false;
  773. for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
  774. const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
  775. hasTrunc = isa<SCEVTruncateExpr>(S);
  776. Operands.push_back(S);
  777. }
  778. if (!hasTrunc)
  779. return getMulExpr(Operands);
  780. UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
  781. }
  782. // If the input value is a chrec scev, truncate the chrec's operands.
  783. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
  784. SmallVector<const SCEV *, 4> Operands;
  785. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
  786. Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
  787. return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
  788. }
  789. // The cast wasn't folded; create an explicit cast node. We can reuse
  790. // the existing insert position since if we get here, we won't have
  791. // made any changes which would invalidate it.
  792. SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
  793. Op, Ty);
  794. UniqueSCEVs.InsertNode(S, IP);
  795. return S;
  796. }
  797. const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
  798. Type *Ty) {
  799. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  800. "This is not an extending conversion!");
  801. assert(isSCEVable(Ty) &&
  802. "This is not a conversion to a SCEVable type!");
  803. Ty = getEffectiveSCEVType(Ty);
  804. // Fold if the operand is constant.
  805. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  806. return getConstant(
  807. cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
  808. // zext(zext(x)) --> zext(x)
  809. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  810. return getZeroExtendExpr(SZ->getOperand(), Ty);
  811. // Before doing any expensive analysis, check to see if we've already
  812. // computed a SCEV for this Op and Ty.
  813. FoldingSetNodeID ID;
  814. ID.AddInteger(scZeroExtend);
  815. ID.AddPointer(Op);
  816. ID.AddPointer(Ty);
  817. void *IP = 0;
  818. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  819. // zext(trunc(x)) --> zext(x) or x or trunc(x)
  820. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
  821. // It's possible the bits taken off by the truncate were all zero bits. If
  822. // so, we should be able to simplify this further.
  823. const SCEV *X = ST->getOperand();
  824. ConstantRange CR = getUnsignedRange(X);
  825. unsigned TruncBits = getTypeSizeInBits(ST->getType());
  826. unsigned NewBits = getTypeSizeInBits(Ty);
  827. if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
  828. CR.zextOrTrunc(NewBits)))
  829. return getTruncateOrZeroExtend(X, Ty);
  830. }
  831. // If the input value is a chrec scev, and we can prove that the value
  832. // did not overflow the old, smaller, value, we can zero extend all of the
  833. // operands (often constants). This allows analysis of something like
  834. // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
  835. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
  836. if (AR->isAffine()) {
  837. const SCEV *Start = AR->getStart();
  838. const SCEV *Step = AR->getStepRecurrence(*this);
  839. unsigned BitWidth = getTypeSizeInBits(AR->getType());
  840. const Loop *L = AR->getLoop();
  841. // If we have special knowledge that this addrec won't overflow,
  842. // we don't need to do any further analysis.
  843. if (AR->getNoWrapFlags(SCEV::FlagNUW))
  844. return getAddRecExpr(getZeroExtendExpr(Start, Ty),
  845. getZeroExtendExpr(Step, Ty),
  846. L, AR->getNoWrapFlags());
  847. // Check whether the backedge-taken count is SCEVCouldNotCompute.
  848. // Note that this serves two purposes: It filters out loops that are
  849. // simply not analyzable, and it covers the case where this code is
  850. // being called from within backedge-taken count analysis, such that
  851. // attempting to ask for the backedge-taken count would likely result
  852. // in infinite recursion. In the later case, the analysis code will
  853. // cope with a conservative value, and it will take care to purge
  854. // that value once it has finished.
  855. const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
  856. if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
  857. // Manually compute the final value for AR, checking for
  858. // overflow.
  859. // Check whether the backedge-taken count can be losslessly casted to
  860. // the addrec's type. The count is always unsigned.
  861. const SCEV *CastedMaxBECount =
  862. getTruncateOrZeroExtend(MaxBECount, Start->getType());
  863. const SCEV *RecastedMaxBECount =
  864. getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
  865. if (MaxBECount == RecastedMaxBECount) {
  866. Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
  867. // Check whether Start+Step*MaxBECount has no unsigned overflow.
  868. const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
  869. const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
  870. const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
  871. const SCEV *WideMaxBECount =
  872. getZeroExtendExpr(CastedMaxBECount, WideTy);
  873. const SCEV *OperandExtendedAdd =
  874. getAddExpr(WideStart,
  875. getMulExpr(WideMaxBECount,
  876. getZeroExtendExpr(Step, WideTy)));
  877. if (ZAdd == OperandExtendedAdd) {
  878. // Cache knowledge of AR NUW, which is propagated to this AddRec.
  879. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
  880. // Return the expression with the addrec on the outside.
  881. return getAddRecExpr(getZeroExtendExpr(Start, Ty),
  882. getZeroExtendExpr(Step, Ty),
  883. L, AR->getNoWrapFlags());
  884. }
  885. // Similar to above, only this time treat the step value as signed.
  886. // This covers loops that count down.
  887. OperandExtendedAdd =
  888. getAddExpr(WideStart,
  889. getMulExpr(WideMaxBECount,
  890. getSignExtendExpr(Step, WideTy)));
  891. if (ZAdd == OperandExtendedAdd) {
  892. // Cache knowledge of AR NW, which is propagated to this AddRec.
  893. // Negative step causes unsigned wrap, but it still can't self-wrap.
  894. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
  895. // Return the expression with the addrec on the outside.
  896. return getAddRecExpr(getZeroExtendExpr(Start, Ty),
  897. getSignExtendExpr(Step, Ty),
  898. L, AR->getNoWrapFlags());
  899. }
  900. }
  901. // If the backedge is guarded by a comparison with the pre-inc value
  902. // the addrec is safe. Also, if the entry is guarded by a comparison
  903. // with the start value and the backedge is guarded by a comparison
  904. // with the post-inc value, the addrec is safe.
  905. if (isKnownPositive(Step)) {
  906. const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
  907. getUnsignedRange(Step).getUnsignedMax());
  908. if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
  909. (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
  910. isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
  911. AR->getPostIncExpr(*this), N))) {
  912. // Cache knowledge of AR NUW, which is propagated to this AddRec.
  913. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
  914. // Return the expression with the addrec on the outside.
  915. return getAddRecExpr(getZeroExtendExpr(Start, Ty),
  916. getZeroExtendExpr(Step, Ty),
  917. L, AR->getNoWrapFlags());
  918. }
  919. } else if (isKnownNegative(Step)) {
  920. const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
  921. getSignedRange(Step).getSignedMin());
  922. if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
  923. (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
  924. isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
  925. AR->getPostIncExpr(*this), N))) {
  926. // Cache knowledge of AR NW, which is propagated to this AddRec.
  927. // Negative step causes unsigned wrap, but it still can't self-wrap.
  928. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
  929. // Return the expression with the addrec on the outside.
  930. return getAddRecExpr(getZeroExtendExpr(Start, Ty),
  931. getSignExtendExpr(Step, Ty),
  932. L, AR->getNoWrapFlags());
  933. }
  934. }
  935. }
  936. }
  937. // The cast wasn't folded; create an explicit cast node.
  938. // Recompute the insert position, as it may have been invalidated.
  939. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  940. SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
  941. Op, Ty);
  942. UniqueSCEVs.InsertNode(S, IP);
  943. return S;
  944. }
  945. // Get the limit of a recurrence such that incrementing by Step cannot cause
  946. // signed overflow as long as the value of the recurrence within the loop does
  947. // not exceed this limit before incrementing.
  948. static const SCEV *getOverflowLimitForStep(const SCEV *Step,
  949. ICmpInst::Predicate *Pred,
  950. ScalarEvolution *SE) {
  951. unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
  952. if (SE->isKnownPositive(Step)) {
  953. *Pred = ICmpInst::ICMP_SLT;
  954. return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
  955. SE->getSignedRange(Step).getSignedMax());
  956. }
  957. if (SE->isKnownNegative(Step)) {
  958. *Pred = ICmpInst::ICMP_SGT;
  959. return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
  960. SE->getSignedRange(Step).getSignedMin());
  961. }
  962. return 0;
  963. }
  964. // The recurrence AR has been shown to have no signed wrap. Typically, if we can
  965. // prove NSW for AR, then we can just as easily prove NSW for its preincrement
  966. // or postincrement sibling. This allows normalizing a sign extended AddRec as
  967. // such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a
  968. // result, the expression "Step + sext(PreIncAR)" is congruent with
  969. // "sext(PostIncAR)"
  970. static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
  971. Type *Ty,
  972. ScalarEvolution *SE) {
  973. const Loop *L = AR->getLoop();
  974. const SCEV *Start = AR->getStart();
  975. const SCEV *Step = AR->getStepRecurrence(*SE);
  976. // Check for a simple looking step prior to loop entry.
  977. const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
  978. if (!SA)
  979. return 0;
  980. // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
  981. // subtraction is expensive. For this purpose, perform a quick and dirty
  982. // difference, by checking for Step in the operand list.
  983. SmallVector<const SCEV *, 4> DiffOps;
  984. for (SCEVAddExpr::op_iterator I = SA->op_begin(), E = SA->op_end();
  985. I != E; ++I) {
  986. if (*I != Step)
  987. DiffOps.push_back(*I);
  988. }
  989. if (DiffOps.size() == SA->getNumOperands())
  990. return 0;
  991. // This is a postinc AR. Check for overflow on the preinc recurrence using the
  992. // same three conditions that getSignExtendedExpr checks.
  993. // 1. NSW flags on the step increment.
  994. const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
  995. const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
  996. SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
  997. if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW))
  998. return PreStart;
  999. // 2. Direct overflow check on the step operation's expression.
  1000. unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
  1001. Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
  1002. const SCEV *OperandExtendedStart =
  1003. SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
  1004. SE->getSignExtendExpr(Step, WideTy));
  1005. if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) {
  1006. // Cache knowledge of PreAR NSW.
  1007. if (PreAR)
  1008. const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW);
  1009. // FIXME: this optimization needs a unit test
  1010. DEBUG(dbgs() << "SCEV: untested prestart overflow check\n");
  1011. return PreStart;
  1012. }
  1013. // 3. Loop precondition.
  1014. ICmpInst::Predicate Pred;
  1015. const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE);
  1016. if (OverflowLimit &&
  1017. SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
  1018. return PreStart;
  1019. }
  1020. return 0;
  1021. }
  1022. // Get the normalized sign-extended expression for this AddRec's Start.
  1023. static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
  1024. Type *Ty,
  1025. ScalarEvolution *SE) {
  1026. const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
  1027. if (!PreStart)
  1028. return SE->getSignExtendExpr(AR->getStart(), Ty);
  1029. return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty),
  1030. SE->getSignExtendExpr(PreStart, Ty));
  1031. }
  1032. const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
  1033. Type *Ty) {
  1034. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  1035. "This is not an extending conversion!");
  1036. assert(isSCEVable(Ty) &&
  1037. "This is not a conversion to a SCEVable type!");
  1038. Ty = getEffectiveSCEVType(Ty);
  1039. // Fold if the operand is constant.
  1040. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  1041. return getConstant(
  1042. cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
  1043. // sext(sext(x)) --> sext(x)
  1044. if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
  1045. return getSignExtendExpr(SS->getOperand(), Ty);
  1046. // sext(zext(x)) --> zext(x)
  1047. if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
  1048. return getZeroExtendExpr(SZ->getOperand(), Ty);
  1049. // Before doing any expensive analysis, check to see if we've already
  1050. // computed a SCEV for this Op and Ty.
  1051. FoldingSetNodeID ID;
  1052. ID.AddInteger(scSignExtend);
  1053. ID.AddPointer(Op);
  1054. ID.AddPointer(Ty);
  1055. void *IP = 0;
  1056. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1057. // If the input value is provably positive, build a zext instead.
  1058. if (isKnownNonNegative(Op))
  1059. return getZeroExtendExpr(Op, Ty);
  1060. // sext(trunc(x)) --> sext(x) or x or trunc(x)
  1061. if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
  1062. // It's possible the bits taken off by the truncate were all sign bits. If
  1063. // so, we should be able to simplify this further.
  1064. const SCEV *X = ST->getOperand();
  1065. ConstantRange CR = getSignedRange(X);
  1066. unsigned TruncBits = getTypeSizeInBits(ST->getType());
  1067. unsigned NewBits = getTypeSizeInBits(Ty);
  1068. if (CR.truncate(TruncBits).signExtend(NewBits).contains(
  1069. CR.sextOrTrunc(NewBits)))
  1070. return getTruncateOrSignExtend(X, Ty);
  1071. }
  1072. // If the input value is a chrec scev, and we can prove that the value
  1073. // did not overflow the old, smaller, value, we can sign extend all of the
  1074. // operands (often constants). This allows analysis of something like
  1075. // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
  1076. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
  1077. if (AR->isAffine()) {
  1078. const SCEV *Start = AR->getStart();
  1079. const SCEV *Step = AR->getStepRecurrence(*this);
  1080. unsigned BitWidth = getTypeSizeInBits(AR->getType());
  1081. const Loop *L = AR->getLoop();
  1082. // If we have special knowledge that this addrec won't overflow,
  1083. // we don't need to do any further analysis.
  1084. if (AR->getNoWrapFlags(SCEV::FlagNSW))
  1085. return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
  1086. getSignExtendExpr(Step, Ty),
  1087. L, SCEV::FlagNSW);
  1088. // Check whether the backedge-taken count is SCEVCouldNotCompute.
  1089. // Note that this serves two purposes: It filters out loops that are
  1090. // simply not analyzable, and it covers the case where this code is
  1091. // being called from within backedge-taken count analysis, such that
  1092. // attempting to ask for the backedge-taken count would likely result
  1093. // in infinite recursion. In the later case, the analysis code will
  1094. // cope with a conservative value, and it will take care to purge
  1095. // that value once it has finished.
  1096. const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
  1097. if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
  1098. // Manually compute the final value for AR, checking for
  1099. // overflow.
  1100. // Check whether the backedge-taken count can be losslessly casted to
  1101. // the addrec's type. The count is always unsigned.
  1102. const SCEV *CastedMaxBECount =
  1103. getTruncateOrZeroExtend(MaxBECount, Start->getType());
  1104. const SCEV *RecastedMaxBECount =
  1105. getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
  1106. if (MaxBECount == RecastedMaxBECount) {
  1107. Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
  1108. // Check whether Start+Step*MaxBECount has no signed overflow.
  1109. const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
  1110. const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
  1111. const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
  1112. const SCEV *WideMaxBECount =
  1113. getZeroExtendExpr(CastedMaxBECount, WideTy);
  1114. const SCEV *OperandExtendedAdd =
  1115. getAddExpr(WideStart,
  1116. getMulExpr(WideMaxBECount,
  1117. getSignExtendExpr(Step, WideTy)));
  1118. if (SAdd == OperandExtendedAdd) {
  1119. // Cache knowledge of AR NSW, which is propagated to this AddRec.
  1120. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1121. // Return the expression with the addrec on the outside.
  1122. return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
  1123. getSignExtendExpr(Step, Ty),
  1124. L, AR->getNoWrapFlags());
  1125. }
  1126. // Similar to above, only this time treat the step value as unsigned.
  1127. // This covers loops that count up with an unsigned step.
  1128. OperandExtendedAdd =
  1129. getAddExpr(WideStart,
  1130. getMulExpr(WideMaxBECount,
  1131. getZeroExtendExpr(Step, WideTy)));
  1132. if (SAdd == OperandExtendedAdd) {
  1133. // Cache knowledge of AR NSW, which is propagated to this AddRec.
  1134. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1135. // Return the expression with the addrec on the outside.
  1136. return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
  1137. getZeroExtendExpr(Step, Ty),
  1138. L, AR->getNoWrapFlags());
  1139. }
  1140. }
  1141. // If the backedge is guarded by a comparison with the pre-inc value
  1142. // the addrec is safe. Also, if the entry is guarded by a comparison
  1143. // with the start value and the backedge is guarded by a comparison
  1144. // with the post-inc value, the addrec is safe.
  1145. ICmpInst::Predicate Pred;
  1146. const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this);
  1147. if (OverflowLimit &&
  1148. (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
  1149. (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
  1150. isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
  1151. OverflowLimit)))) {
  1152. // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
  1153. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
  1154. return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
  1155. getSignExtendExpr(Step, Ty),
  1156. L, AR->getNoWrapFlags());
  1157. }
  1158. }
  1159. }
  1160. // The cast wasn't folded; create an explicit cast node.
  1161. // Recompute the insert position, as it may have been invalidated.
  1162. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  1163. SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
  1164. Op, Ty);
  1165. UniqueSCEVs.InsertNode(S, IP);
  1166. return S;
  1167. }
  1168. /// getAnyExtendExpr - Return a SCEV for the given operand extended with
  1169. /// unspecified bits out to the given type.
  1170. ///
  1171. const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
  1172. Type *Ty) {
  1173. assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
  1174. "This is not an extending conversion!");
  1175. assert(isSCEVable(Ty) &&
  1176. "This is not a conversion to a SCEVable type!");
  1177. Ty = getEffectiveSCEVType(Ty);
  1178. // Sign-extend negative constants.
  1179. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
  1180. if (SC->getValue()->getValue().isNegative())
  1181. return getSignExtendExpr(Op, Ty);
  1182. // Peel off a truncate cast.
  1183. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
  1184. const SCEV *NewOp = T->getOperand();
  1185. if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
  1186. return getAnyExtendExpr(NewOp, Ty);
  1187. return getTruncateOrNoop(NewOp, Ty);
  1188. }
  1189. // Next try a zext cast. If the cast is folded, use it.
  1190. const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
  1191. if (!isa<SCEVZeroExtendExpr>(ZExt))
  1192. return ZExt;
  1193. // Next try a sext cast. If the cast is folded, use it.
  1194. const SCEV *SExt = getSignExtendExpr(Op, Ty);
  1195. if (!isa<SCEVSignExtendExpr>(SExt))
  1196. return SExt;
  1197. // Force the cast to be folded into the operands of an addrec.
  1198. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
  1199. SmallVector<const SCEV *, 4> Ops;
  1200. for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
  1201. I != E; ++I)
  1202. Ops.push_back(getAnyExtendExpr(*I, Ty));
  1203. return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
  1204. }
  1205. // If the expression is obviously signed, use the sext cast value.
  1206. if (isa<SCEVSMaxExpr>(Op))
  1207. return SExt;
  1208. // Absent any other information, use the zext cast value.
  1209. return ZExt;
  1210. }
  1211. /// CollectAddOperandsWithScales - Process the given Ops list, which is
  1212. /// a list of operands to be added under the given scale, update the given
  1213. /// map. This is a helper function for getAddRecExpr. As an example of
  1214. /// what it does, given a sequence of operands that would form an add
  1215. /// expression like this:
  1216. ///
  1217. /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
  1218. ///
  1219. /// where A and B are constants, update the map with these values:
  1220. ///
  1221. /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
  1222. ///
  1223. /// and add 13 + A*B*29 to AccumulatedConstant.
  1224. /// This will allow getAddRecExpr to produce this:
  1225. ///
  1226. /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
  1227. ///
  1228. /// This form often exposes folding opportunities that are hidden in
  1229. /// the original operand list.
  1230. ///
  1231. /// Return true iff it appears that any interesting folding opportunities
  1232. /// may be exposed. This helps getAddRecExpr short-circuit extra work in
  1233. /// the common case where no interesting opportunities are present, and
  1234. /// is also used as a check to avoid infinite recursion.
  1235. ///
  1236. static bool
  1237. CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
  1238. SmallVector<const SCEV *, 8> &NewOps,
  1239. APInt &AccumulatedConstant,
  1240. const SCEV *const *Ops, size_t NumOperands,
  1241. const APInt &Scale,
  1242. ScalarEvolution &SE) {
  1243. bool Interesting = false;
  1244. // Iterate over the add operands. They are sorted, with constants first.
  1245. unsigned i = 0;
  1246. while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
  1247. ++i;
  1248. // Pull a buried constant out to the outside.
  1249. if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
  1250. Interesting = true;
  1251. AccumulatedConstant += Scale * C->getValue()->getValue();
  1252. }
  1253. // Next comes everything else. We're especially interested in multiplies
  1254. // here, but they're in the middle, so just visit the rest with one loop.
  1255. for (; i != NumOperands; ++i) {
  1256. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
  1257. if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
  1258. APInt NewScale =
  1259. Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
  1260. if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
  1261. // A multiplication of a constant with another add; recurse.
  1262. const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
  1263. Interesting |=
  1264. CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
  1265. Add->op_begin(), Add->getNumOperands(),
  1266. NewScale, SE);
  1267. } else {
  1268. // A multiplication of a constant with some other value. Update
  1269. // the map.
  1270. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
  1271. const SCEV *Key = SE.getMulExpr(MulOps);
  1272. std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
  1273. M.insert(std::make_pair(Key, NewScale));
  1274. if (Pair.second) {
  1275. NewOps.push_back(Pair.first->first);
  1276. } else {
  1277. Pair.first->second += NewScale;
  1278. // The map already had an entry for this value, which may indicate
  1279. // a folding opportunity.
  1280. Interesting = true;
  1281. }
  1282. }
  1283. } else {
  1284. // An ordinary operand. Update the map.
  1285. std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
  1286. M.insert(std::make_pair(Ops[i], Scale));
  1287. if (Pair.second) {
  1288. NewOps.push_back(Pair.first->first);
  1289. } else {
  1290. Pair.first->second += Scale;
  1291. // The map already had an entry for this value, which may indicate
  1292. // a folding opportunity.
  1293. Interesting = true;
  1294. }
  1295. }
  1296. }
  1297. return Interesting;
  1298. }
  1299. namespace {
  1300. struct APIntCompare {
  1301. bool operator()(const APInt &LHS, const APInt &RHS) const {
  1302. return LHS.ult(RHS);
  1303. }
  1304. };
  1305. }
  1306. /// getAddExpr - Get a canonical add expression, or something simpler if
  1307. /// possible.
  1308. const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
  1309. SCEV::NoWrapFlags Flags) {
  1310. assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
  1311. "only nuw or nsw allowed");
  1312. assert(!Ops.empty() && "Cannot get empty add!");
  1313. if (Ops.size() == 1) return Ops[0];
  1314. #ifndef NDEBUG
  1315. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  1316. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  1317. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  1318. "SCEVAddExpr operand types don't match!");
  1319. #endif
  1320. // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
  1321. // And vice-versa.
  1322. int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
  1323. SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
  1324. if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
  1325. bool All = true;
  1326. for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
  1327. E = Ops.end(); I != E; ++I)
  1328. if (!isKnownNonNegative(*I)) {
  1329. All = false;
  1330. break;
  1331. }
  1332. if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
  1333. }
  1334. // Sort by complexity, this groups all similar expression types together.
  1335. GroupByComplexity(Ops, LI);
  1336. // If there are any constants, fold them together.
  1337. unsigned Idx = 0;
  1338. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  1339. ++Idx;
  1340. assert(Idx < Ops.size());
  1341. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  1342. // We found two constants, fold them together!
  1343. Ops[0] = getConstant(LHSC->getValue()->getValue() +
  1344. RHSC->getValue()->getValue());
  1345. if (Ops.size() == 2) return Ops[0];
  1346. Ops.erase(Ops.begin()+1); // Erase the folded element
  1347. LHSC = cast<SCEVConstant>(Ops[0]);
  1348. }
  1349. // If we are left with a constant zero being added, strip it off.
  1350. if (LHSC->getValue()->isZero()) {
  1351. Ops.erase(Ops.begin());
  1352. --Idx;
  1353. }
  1354. if (Ops.size() == 1) return Ops[0];
  1355. }
  1356. // Okay, check to see if the same value occurs in the operand list more than
  1357. // once. If so, merge them together into an multiply expression. Since we
  1358. // sorted the list, these values are required to be adjacent.
  1359. Type *Ty = Ops[0]->getType();
  1360. bool FoundMatch = false;
  1361. for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
  1362. if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
  1363. // Scan ahead to count how many equal operands there are.
  1364. unsigned Count = 2;
  1365. while (i+Count != e && Ops[i+Count] == Ops[i])
  1366. ++Count;
  1367. // Merge the values into a multiply.
  1368. const SCEV *Scale = getConstant(Ty, Count);
  1369. const SCEV *Mul = getMulExpr(Scale, Ops[i]);
  1370. if (Ops.size() == Count)
  1371. return Mul;
  1372. Ops[i] = Mul;
  1373. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
  1374. --i; e -= Count - 1;
  1375. FoundMatch = true;
  1376. }
  1377. if (FoundMatch)
  1378. return getAddExpr(Ops, Flags);
  1379. // Check for truncates. If all the operands are truncated from the same
  1380. // type, see if factoring out the truncate would permit the result to be
  1381. // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
  1382. // if the contents of the resulting outer trunc fold to something simple.
  1383. for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
  1384. const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
  1385. Type *DstType = Trunc->getType();
  1386. Type *SrcType = Trunc->getOperand()->getType();
  1387. SmallVector<const SCEV *, 8> LargeOps;
  1388. bool Ok = true;
  1389. // Check all the operands to see if they can be represented in the
  1390. // source type of the truncate.
  1391. for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
  1392. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
  1393. if (T->getOperand()->getType() != SrcType) {
  1394. Ok = false;
  1395. break;
  1396. }
  1397. LargeOps.push_back(T->getOperand());
  1398. } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
  1399. LargeOps.push_back(getAnyExtendExpr(C, SrcType));
  1400. } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
  1401. SmallVector<const SCEV *, 8> LargeMulOps;
  1402. for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
  1403. if (const SCEVTruncateExpr *T =
  1404. dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
  1405. if (T->getOperand()->getType() != SrcType) {
  1406. Ok = false;
  1407. break;
  1408. }
  1409. LargeMulOps.push_back(T->getOperand());
  1410. } else if (const SCEVConstant *C =
  1411. dyn_cast<SCEVConstant>(M->getOperand(j))) {
  1412. LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
  1413. } else {
  1414. Ok = false;
  1415. break;
  1416. }
  1417. }
  1418. if (Ok)
  1419. LargeOps.push_back(getMulExpr(LargeMulOps));
  1420. } else {
  1421. Ok = false;
  1422. break;
  1423. }
  1424. }
  1425. if (Ok) {
  1426. // Evaluate the expression in the larger type.
  1427. const SCEV *Fold = getAddExpr(LargeOps, Flags);
  1428. // If it folds to something simple, use it. Otherwise, don't.
  1429. if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
  1430. return getTruncateExpr(Fold, DstType);
  1431. }
  1432. }
  1433. // Skip past any other cast SCEVs.
  1434. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
  1435. ++Idx;
  1436. // If there are add operands they would be next.
  1437. if (Idx < Ops.size()) {
  1438. bool DeletedAdd = false;
  1439. while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
  1440. // If we have an add, expand the add operands onto the end of the operands
  1441. // list.
  1442. Ops.erase(Ops.begin()+Idx);
  1443. Ops.append(Add->op_begin(), Add->op_end());
  1444. DeletedAdd = true;
  1445. }
  1446. // If we deleted at least one add, we added operands to the end of the list,
  1447. // and they are not necessarily sorted. Recurse to resort and resimplify
  1448. // any operands we just acquired.
  1449. if (DeletedAdd)
  1450. return getAddExpr(Ops);
  1451. }
  1452. // Skip over the add expression until we get to a multiply.
  1453. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
  1454. ++Idx;
  1455. // Check to see if there are any folding opportunities present with
  1456. // operands multiplied by constant values.
  1457. if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
  1458. uint64_t BitWidth = getTypeSizeInBits(Ty);
  1459. DenseMap<const SCEV *, APInt> M;
  1460. SmallVector<const SCEV *, 8> NewOps;
  1461. APInt AccumulatedConstant(BitWidth, 0);
  1462. if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
  1463. Ops.data(), Ops.size(),
  1464. APInt(BitWidth, 1), *this)) {
  1465. // Some interesting folding opportunity is present, so its worthwhile to
  1466. // re-generate the operands list. Group the operands by constant scale,
  1467. // to avoid multiplying by the same constant scale multiple times.
  1468. std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
  1469. for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(),
  1470. E = NewOps.end(); I != E; ++I)
  1471. MulOpLists[M.find(*I)->second].push_back(*I);
  1472. // Re-generate the operands list.
  1473. Ops.clear();
  1474. if (AccumulatedConstant != 0)
  1475. Ops.push_back(getConstant(AccumulatedConstant));
  1476. for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
  1477. I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
  1478. if (I->first != 0)
  1479. Ops.push_back(getMulExpr(getConstant(I->first),
  1480. getAddExpr(I->second)));
  1481. if (Ops.empty())
  1482. return getConstant(Ty, 0);
  1483. if (Ops.size() == 1)
  1484. return Ops[0];
  1485. return getAddExpr(Ops);
  1486. }
  1487. }
  1488. // If we are adding something to a multiply expression, make sure the
  1489. // something is not already an operand of the multiply. If so, merge it into
  1490. // the multiply.
  1491. for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
  1492. const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
  1493. for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
  1494. const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
  1495. if (isa<SCEVConstant>(MulOpSCEV))
  1496. continue;
  1497. for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
  1498. if (MulOpSCEV == Ops[AddOp]) {
  1499. // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
  1500. const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
  1501. if (Mul->getNumOperands() != 2) {
  1502. // If the multiply has more than two operands, we must get the
  1503. // Y*Z term.
  1504. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
  1505. Mul->op_begin()+MulOp);
  1506. MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
  1507. InnerMul = getMulExpr(MulOps);
  1508. }
  1509. const SCEV *One = getConstant(Ty, 1);
  1510. const SCEV *AddOne = getAddExpr(One, InnerMul);
  1511. const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
  1512. if (Ops.size() == 2) return OuterMul;
  1513. if (AddOp < Idx) {
  1514. Ops.erase(Ops.begin()+AddOp);
  1515. Ops.erase(Ops.begin()+Idx-1);
  1516. } else {
  1517. Ops.erase(Ops.begin()+Idx);
  1518. Ops.erase(Ops.begin()+AddOp-1);
  1519. }
  1520. Ops.push_back(OuterMul);
  1521. return getAddExpr(Ops);
  1522. }
  1523. // Check this multiply against other multiplies being added together.
  1524. for (unsigned OtherMulIdx = Idx+1;
  1525. OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
  1526. ++OtherMulIdx) {
  1527. const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
  1528. // If MulOp occurs in OtherMul, we can fold the two multiplies
  1529. // together.
  1530. for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
  1531. OMulOp != e; ++OMulOp)
  1532. if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
  1533. // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
  1534. const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
  1535. if (Mul->getNumOperands() != 2) {
  1536. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
  1537. Mul->op_begin()+MulOp);
  1538. MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
  1539. InnerMul1 = getMulExpr(MulOps);
  1540. }
  1541. const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
  1542. if (OtherMul->getNumOperands() != 2) {
  1543. SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
  1544. OtherMul->op_begin()+OMulOp);
  1545. MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
  1546. InnerMul2 = getMulExpr(MulOps);
  1547. }
  1548. const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
  1549. const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
  1550. if (Ops.size() == 2) return OuterMul;
  1551. Ops.erase(Ops.begin()+Idx);
  1552. Ops.erase(Ops.begin()+OtherMulIdx-1);
  1553. Ops.push_back(OuterMul);
  1554. return getAddExpr(Ops);
  1555. }
  1556. }
  1557. }
  1558. }
  1559. // If there are any add recurrences in the operands list, see if any other
  1560. // added values are loop invariant. If so, we can fold them into the
  1561. // recurrence.
  1562. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
  1563. ++Idx;
  1564. // Scan over all recurrences, trying to fold loop invariants into them.
  1565. for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
  1566. // Scan all of the other operands to this add and add them to the vector if
  1567. // they are loop invariant w.r.t. the recurrence.
  1568. SmallVector<const SCEV *, 8> LIOps;
  1569. const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
  1570. const Loop *AddRecLoop = AddRec->getLoop();
  1571. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  1572. if (isLoopInvariant(Ops[i], AddRecLoop)) {
  1573. LIOps.push_back(Ops[i]);
  1574. Ops.erase(Ops.begin()+i);
  1575. --i; --e;
  1576. }
  1577. // If we found some loop invariants, fold them into the recurrence.
  1578. if (!LIOps.empty()) {
  1579. // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
  1580. LIOps.push_back(AddRec->getStart());
  1581. SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
  1582. AddRec->op_end());
  1583. AddRecOps[0] = getAddExpr(LIOps);
  1584. // Build the new addrec. Propagate the NUW and NSW flags if both the
  1585. // outer add and the inner addrec are guaranteed to have no overflow.
  1586. // Always propagate NW.
  1587. Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
  1588. const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
  1589. // If all of the other operands were loop invariant, we are done.
  1590. if (Ops.size() == 1) return NewRec;
  1591. // Otherwise, add the folded AddRec by the non-invariant parts.
  1592. for (unsigned i = 0;; ++i)
  1593. if (Ops[i] == AddRec) {
  1594. Ops[i] = NewRec;
  1595. break;
  1596. }
  1597. return getAddExpr(Ops);
  1598. }
  1599. // Okay, if there weren't any loop invariants to be folded, check to see if
  1600. // there are multiple AddRec's with the same loop induction variable being
  1601. // added together. If so, we can fold them.
  1602. for (unsigned OtherIdx = Idx+1;
  1603. OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  1604. ++OtherIdx)
  1605. if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
  1606. // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
  1607. SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
  1608. AddRec->op_end());
  1609. for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  1610. ++OtherIdx)
  1611. if (const SCEVAddRecExpr *OtherAddRec =
  1612. dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
  1613. if (OtherAddRec->getLoop() == AddRecLoop) {
  1614. for (unsigned i = 0, e = OtherAddRec->getNumOperands();
  1615. i != e; ++i) {
  1616. if (i >= AddRecOps.size()) {
  1617. AddRecOps.append(OtherAddRec->op_begin()+i,
  1618. OtherAddRec->op_end());
  1619. break;
  1620. }
  1621. AddRecOps[i] = getAddExpr(AddRecOps[i],
  1622. OtherAddRec->getOperand(i));
  1623. }
  1624. Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
  1625. }
  1626. // Step size has changed, so we cannot guarantee no self-wraparound.
  1627. Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
  1628. return getAddExpr(Ops);
  1629. }
  1630. // Otherwise couldn't fold anything into this recurrence. Move onto the
  1631. // next one.
  1632. }
  1633. // Okay, it looks like we really DO need an add expr. Check to see if we
  1634. // already have one, otherwise create a new one.
  1635. FoldingSetNodeID ID;
  1636. ID.AddInteger(scAddExpr);
  1637. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  1638. ID.AddPointer(Ops[i]);
  1639. void *IP = 0;
  1640. SCEVAddExpr *S =
  1641. static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  1642. if (!S) {
  1643. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  1644. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  1645. S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
  1646. O, Ops.size());
  1647. UniqueSCEVs.InsertNode(S, IP);
  1648. }
  1649. S->setNoWrapFlags(Flags);
  1650. return S;
  1651. }
  1652. static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
  1653. uint64_t k = i*j;
  1654. if (j > 1 && k / j != i) Overflow = true;
  1655. return k;
  1656. }
  1657. /// Compute the result of "n choose k", the binomial coefficient. If an
  1658. /// intermediate computation overflows, Overflow will be set and the return will
  1659. /// be garbage. Overflow is not cleared on absence of overflow.
  1660. static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
  1661. // We use the multiplicative formula:
  1662. // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
  1663. // At each iteration, we take the n-th term of the numeral and divide by the
  1664. // (k-n)th term of the denominator. This division will always produce an
  1665. // integral result, and helps reduce the chance of overflow in the
  1666. // intermediate computations. However, we can still overflow even when the
  1667. // final result would fit.
  1668. if (n == 0 || n == k) return 1;
  1669. if (k > n) return 0;
  1670. if (k > n/2)
  1671. k = n-k;
  1672. uint64_t r = 1;
  1673. for (uint64_t i = 1; i <= k; ++i) {
  1674. r = umul_ov(r, n-(i-1), Overflow);
  1675. r /= i;
  1676. }
  1677. return r;
  1678. }
  1679. /// getMulExpr - Get a canonical multiply expression, or something simpler if
  1680. /// possible.
  1681. const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
  1682. SCEV::NoWrapFlags Flags) {
  1683. assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
  1684. "only nuw or nsw allowed");
  1685. assert(!Ops.empty() && "Cannot get empty mul!");
  1686. if (Ops.size() == 1) return Ops[0];
  1687. #ifndef NDEBUG
  1688. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  1689. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  1690. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  1691. "SCEVMulExpr operand types don't match!");
  1692. #endif
  1693. // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
  1694. // And vice-versa.
  1695. int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
  1696. SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
  1697. if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
  1698. bool All = true;
  1699. for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
  1700. E = Ops.end(); I != E; ++I)
  1701. if (!isKnownNonNegative(*I)) {
  1702. All = false;
  1703. break;
  1704. }
  1705. if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
  1706. }
  1707. // Sort by complexity, this groups all similar expression types together.
  1708. GroupByComplexity(Ops, LI);
  1709. // If there are any constants, fold them together.
  1710. unsigned Idx = 0;
  1711. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  1712. // C1*(C2+V) -> C1*C2 + C1*V
  1713. if (Ops.size() == 2)
  1714. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
  1715. if (Add->getNumOperands() == 2 &&
  1716. isa<SCEVConstant>(Add->getOperand(0)))
  1717. return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
  1718. getMulExpr(LHSC, Add->getOperand(1)));
  1719. ++Idx;
  1720. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  1721. // We found two constants, fold them together!
  1722. ConstantInt *Fold = ConstantInt::get(getContext(),
  1723. LHSC->getValue()->getValue() *
  1724. RHSC->getValue()->getValue());
  1725. Ops[0] = getConstant(Fold);
  1726. Ops.erase(Ops.begin()+1); // Erase the folded element
  1727. if (Ops.size() == 1) return Ops[0];
  1728. LHSC = cast<SCEVConstant>(Ops[0]);
  1729. }
  1730. // If we are left with a constant one being multiplied, strip it off.
  1731. if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
  1732. Ops.erase(Ops.begin());
  1733. --Idx;
  1734. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
  1735. // If we have a multiply of zero, it will always be zero.
  1736. return Ops[0];
  1737. } else if (Ops[0]->isAllOnesValue()) {
  1738. // If we have a mul by -1 of an add, try distributing the -1 among the
  1739. // add operands.
  1740. if (Ops.size() == 2) {
  1741. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
  1742. SmallVector<const SCEV *, 4> NewOps;
  1743. bool AnyFolded = false;
  1744. for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
  1745. E = Add->op_end(); I != E; ++I) {
  1746. const SCEV *Mul = getMulExpr(Ops[0], *I);
  1747. if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
  1748. NewOps.push_back(Mul);
  1749. }
  1750. if (AnyFolded)
  1751. return getAddExpr(NewOps);
  1752. }
  1753. else if (const SCEVAddRecExpr *
  1754. AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
  1755. // Negation preserves a recurrence's no self-wrap property.
  1756. SmallVector<const SCEV *, 4> Operands;
  1757. for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
  1758. E = AddRec->op_end(); I != E; ++I) {
  1759. Operands.push_back(getMulExpr(Ops[0], *I));
  1760. }
  1761. return getAddRecExpr(Operands, AddRec->getLoop(),
  1762. AddRec->getNoWrapFlags(SCEV::FlagNW));
  1763. }
  1764. }
  1765. }
  1766. if (Ops.size() == 1)
  1767. return Ops[0];
  1768. }
  1769. // Skip over the add expression until we get to a multiply.
  1770. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
  1771. ++Idx;
  1772. // If there are mul operands inline them all into this expression.
  1773. if (Idx < Ops.size()) {
  1774. bool DeletedMul = false;
  1775. while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
  1776. // If we have an mul, expand the mul operands onto the end of the operands
  1777. // list.
  1778. Ops.erase(Ops.begin()+Idx);
  1779. Ops.append(Mul->op_begin(), Mul->op_end());
  1780. DeletedMul = true;
  1781. }
  1782. // If we deleted at least one mul, we added operands to the end of the list,
  1783. // and they are not necessarily sorted. Recurse to resort and resimplify
  1784. // any operands we just acquired.
  1785. if (DeletedMul)
  1786. return getMulExpr(Ops);
  1787. }
  1788. // If there are any add recurrences in the operands list, see if any other
  1789. // added values are loop invariant. If so, we can fold them into the
  1790. // recurrence.
  1791. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
  1792. ++Idx;
  1793. // Scan over all recurrences, trying to fold loop invariants into them.
  1794. for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
  1795. // Scan all of the other operands to this mul and add them to the vector if
  1796. // they are loop invariant w.r.t. the recurrence.
  1797. SmallVector<const SCEV *, 8> LIOps;
  1798. const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
  1799. const Loop *AddRecLoop = AddRec->getLoop();
  1800. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  1801. if (isLoopInvariant(Ops[i], AddRecLoop)) {
  1802. LIOps.push_back(Ops[i]);
  1803. Ops.erase(Ops.begin()+i);
  1804. --i; --e;
  1805. }
  1806. // If we found some loop invariants, fold them into the recurrence.
  1807. if (!LIOps.empty()) {
  1808. // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
  1809. SmallVector<const SCEV *, 4> NewOps;
  1810. NewOps.reserve(AddRec->getNumOperands());
  1811. const SCEV *Scale = getMulExpr(LIOps);
  1812. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
  1813. NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
  1814. // Build the new addrec. Propagate the NUW and NSW flags if both the
  1815. // outer mul and the inner addrec are guaranteed to have no overflow.
  1816. //
  1817. // No self-wrap cannot be guaranteed after changing the step size, but
  1818. // will be inferred if either NUW or NSW is true.
  1819. Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
  1820. const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
  1821. // If all of the other operands were loop invariant, we are done.
  1822. if (Ops.size() == 1) return NewRec;
  1823. // Otherwise, multiply the folded AddRec by the non-invariant parts.
  1824. for (unsigned i = 0;; ++i)
  1825. if (Ops[i] == AddRec) {
  1826. Ops[i] = NewRec;
  1827. break;
  1828. }
  1829. return getMulExpr(Ops);
  1830. }
  1831. // Okay, if there weren't any loop invariants to be folded, check to see if
  1832. // there are multiple AddRec's with the same loop induction variable being
  1833. // multiplied together. If so, we can fold them.
  1834. for (unsigned OtherIdx = Idx+1;
  1835. OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  1836. ++OtherIdx) {
  1837. if (AddRecLoop != cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop())
  1838. continue;
  1839. // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
  1840. // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
  1841. // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
  1842. // ]]],+,...up to x=2n}.
  1843. // Note that the arguments to choose() are always integers with values
  1844. // known at compile time, never SCEV objects.
  1845. //
  1846. // The implementation avoids pointless extra computations when the two
  1847. // addrec's are of different length (mathematically, it's equivalent to
  1848. // an infinite stream of zeros on the right).
  1849. bool OpsModified = false;
  1850. for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
  1851. ++OtherIdx) {
  1852. const SCEVAddRecExpr *OtherAddRec =
  1853. dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
  1854. if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
  1855. continue;
  1856. bool Overflow = false;
  1857. Type *Ty = AddRec->getType();
  1858. bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
  1859. SmallVector<const SCEV*, 7> AddRecOps;
  1860. for (int x = 0, xe = AddRec->getNumOperands() +
  1861. OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
  1862. const SCEV *Term = getConstant(Ty, 0);
  1863. for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
  1864. uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
  1865. for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
  1866. ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
  1867. z < ze && !Overflow; ++z) {
  1868. uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
  1869. uint64_t Coeff;
  1870. if (LargerThan64Bits)
  1871. Coeff = umul_ov(Coeff1, Coeff2, Overflow);
  1872. else
  1873. Coeff = Coeff1*Coeff2;
  1874. const SCEV *CoeffTerm = getConstant(Ty, Coeff);
  1875. const SCEV *Term1 = AddRec->getOperand(y-z);
  1876. const SCEV *Term2 = OtherAddRec->getOperand(z);
  1877. Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
  1878. }
  1879. }
  1880. AddRecOps.push_back(Term);
  1881. }
  1882. if (!Overflow) {
  1883. const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
  1884. SCEV::FlagAnyWrap);
  1885. if (Ops.size() == 2) return NewAddRec;
  1886. Ops[Idx] = NewAddRec;
  1887. Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
  1888. OpsModified = true;
  1889. AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
  1890. if (!AddRec)
  1891. break;
  1892. }
  1893. }
  1894. if (OpsModified)
  1895. return getMulExpr(Ops);
  1896. }
  1897. // Otherwise couldn't fold anything into this recurrence. Move onto the
  1898. // next one.
  1899. }
  1900. // Okay, it looks like we really DO need an mul expr. Check to see if we
  1901. // already have one, otherwise create a new one.
  1902. FoldingSetNodeID ID;
  1903. ID.AddInteger(scMulExpr);
  1904. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  1905. ID.AddPointer(Ops[i]);
  1906. void *IP = 0;
  1907. SCEVMulExpr *S =
  1908. static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  1909. if (!S) {
  1910. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  1911. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  1912. S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
  1913. O, Ops.size());
  1914. UniqueSCEVs.InsertNode(S, IP);
  1915. }
  1916. S->setNoWrapFlags(Flags);
  1917. return S;
  1918. }
  1919. /// getUDivExpr - Get a canonical unsigned division expression, or something
  1920. /// simpler if possible.
  1921. const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
  1922. const SCEV *RHS) {
  1923. assert(getEffectiveSCEVType(LHS->getType()) ==
  1924. getEffectiveSCEVType(RHS->getType()) &&
  1925. "SCEVUDivExpr operand types don't match!");
  1926. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
  1927. if (RHSC->getValue()->equalsInt(1))
  1928. return LHS; // X udiv 1 --> x
  1929. // If the denominator is zero, the result of the udiv is undefined. Don't
  1930. // try to analyze it, because the resolution chosen here may differ from
  1931. // the resolution chosen in other parts of the compiler.
  1932. if (!RHSC->getValue()->isZero()) {
  1933. // Determine if the division can be folded into the operands of
  1934. // its operands.
  1935. // TODO: Generalize this to non-constants by using known-bits information.
  1936. Type *Ty = LHS->getType();
  1937. unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
  1938. unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
  1939. // For non-power-of-two values, effectively round the value up to the
  1940. // nearest power of two.
  1941. if (!RHSC->getValue()->getValue().isPowerOf2())
  1942. ++MaxShiftAmt;
  1943. IntegerType *ExtTy =
  1944. IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
  1945. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
  1946. if (const SCEVConstant *Step =
  1947. dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
  1948. // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
  1949. const APInt &StepInt = Step->getValue()->getValue();
  1950. const APInt &DivInt = RHSC->getValue()->getValue();
  1951. if (!StepInt.urem(DivInt) &&
  1952. getZeroExtendExpr(AR, ExtTy) ==
  1953. getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
  1954. getZeroExtendExpr(Step, ExtTy),
  1955. AR->getLoop(), SCEV::FlagAnyWrap)) {
  1956. SmallVector<const SCEV *, 4> Operands;
  1957. for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
  1958. Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
  1959. return getAddRecExpr(Operands, AR->getLoop(),
  1960. SCEV::FlagNW);
  1961. }
  1962. /// Get a canonical UDivExpr for a recurrence.
  1963. /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
  1964. // We can currently only fold X%N if X is constant.
  1965. const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
  1966. if (StartC && !DivInt.urem(StepInt) &&
  1967. getZeroExtendExpr(AR, ExtTy) ==
  1968. getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
  1969. getZeroExtendExpr(Step, ExtTy),
  1970. AR->getLoop(), SCEV::FlagAnyWrap)) {
  1971. const APInt &StartInt = StartC->getValue()->getValue();
  1972. const APInt &StartRem = StartInt.urem(StepInt);
  1973. if (StartRem != 0)
  1974. LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
  1975. AR->getLoop(), SCEV::FlagNW);
  1976. }
  1977. }
  1978. // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
  1979. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
  1980. SmallVector<const SCEV *, 4> Operands;
  1981. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
  1982. Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
  1983. if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
  1984. // Find an operand that's safely divisible.
  1985. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
  1986. const SCEV *Op = M->getOperand(i);
  1987. const SCEV *Div = getUDivExpr(Op, RHSC);
  1988. if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
  1989. Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
  1990. M->op_end());
  1991. Operands[i] = Div;
  1992. return getMulExpr(Operands);
  1993. }
  1994. }
  1995. }
  1996. // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
  1997. if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
  1998. SmallVector<const SCEV *, 4> Operands;
  1999. for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
  2000. Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
  2001. if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
  2002. Operands.clear();
  2003. for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
  2004. const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
  2005. if (isa<SCEVUDivExpr>(Op) ||
  2006. getMulExpr(Op, RHS) != A->getOperand(i))
  2007. break;
  2008. Operands.push_back(Op);
  2009. }
  2010. if (Operands.size() == A->getNumOperands())
  2011. return getAddExpr(Operands);
  2012. }
  2013. }
  2014. // Fold if both operands are constant.
  2015. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
  2016. Constant *LHSCV = LHSC->getValue();
  2017. Constant *RHSCV = RHSC->getValue();
  2018. return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
  2019. RHSCV)));
  2020. }
  2021. }
  2022. }
  2023. FoldingSetNodeID ID;
  2024. ID.AddInteger(scUDivExpr);
  2025. ID.AddPointer(LHS);
  2026. ID.AddPointer(RHS);
  2027. void *IP = 0;
  2028. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2029. SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
  2030. LHS, RHS);
  2031. UniqueSCEVs.InsertNode(S, IP);
  2032. return S;
  2033. }
  2034. /// getAddRecExpr - Get an add recurrence expression for the specified loop.
  2035. /// Simplify the expression as much as possible.
  2036. const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
  2037. const Loop *L,
  2038. SCEV::NoWrapFlags Flags) {
  2039. SmallVector<const SCEV *, 4> Operands;
  2040. Operands.push_back(Start);
  2041. if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
  2042. if (StepChrec->getLoop() == L) {
  2043. Operands.append(StepChrec->op_begin(), StepChrec->op_end());
  2044. return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
  2045. }
  2046. Operands.push_back(Step);
  2047. return getAddRecExpr(Operands, L, Flags);
  2048. }
  2049. /// getAddRecExpr - Get an add recurrence expression for the specified loop.
  2050. /// Simplify the expression as much as possible.
  2051. const SCEV *
  2052. ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
  2053. const Loop *L, SCEV::NoWrapFlags Flags) {
  2054. if (Operands.size() == 1) return Operands[0];
  2055. #ifndef NDEBUG
  2056. Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
  2057. for (unsigned i = 1, e = Operands.size(); i != e; ++i)
  2058. assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
  2059. "SCEVAddRecExpr operand types don't match!");
  2060. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2061. assert(isLoopInvariant(Operands[i], L) &&
  2062. "SCEVAddRecExpr operand is not loop-invariant!");
  2063. #endif
  2064. if (Operands.back()->isZero()) {
  2065. Operands.pop_back();
  2066. return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
  2067. }
  2068. // It's tempting to want to call getMaxBackedgeTakenCount count here and
  2069. // use that information to infer NUW and NSW flags. However, computing a
  2070. // BE count requires calling getAddRecExpr, so we may not yet have a
  2071. // meaningful BE count at this point (and if we don't, we'd be stuck
  2072. // with a SCEVCouldNotCompute as the cached BE count).
  2073. // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
  2074. // And vice-versa.
  2075. int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
  2076. SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
  2077. if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
  2078. bool All = true;
  2079. for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
  2080. E = Operands.end(); I != E; ++I)
  2081. if (!isKnownNonNegative(*I)) {
  2082. All = false;
  2083. break;
  2084. }
  2085. if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
  2086. }
  2087. // Canonicalize nested AddRecs in by nesting them in order of loop depth.
  2088. if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
  2089. const Loop *NestedLoop = NestedAR->getLoop();
  2090. if (L->contains(NestedLoop) ?
  2091. (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
  2092. (!NestedLoop->contains(L) &&
  2093. DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
  2094. SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
  2095. NestedAR->op_end());
  2096. Operands[0] = NestedAR->getStart();
  2097. // AddRecs require their operands be loop-invariant with respect to their
  2098. // loops. Don't perform this transformation if it would break this
  2099. // requirement.
  2100. bool AllInvariant = true;
  2101. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2102. if (!isLoopInvariant(Operands[i], L)) {
  2103. AllInvariant = false;
  2104. break;
  2105. }
  2106. if (AllInvariant) {
  2107. // Create a recurrence for the outer loop with the same step size.
  2108. //
  2109. // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
  2110. // inner recurrence has the same property.
  2111. SCEV::NoWrapFlags OuterFlags =
  2112. maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
  2113. NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
  2114. AllInvariant = true;
  2115. for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
  2116. if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
  2117. AllInvariant = false;
  2118. break;
  2119. }
  2120. if (AllInvariant) {
  2121. // Ok, both add recurrences are valid after the transformation.
  2122. //
  2123. // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
  2124. // the outer recurrence has the same property.
  2125. SCEV::NoWrapFlags InnerFlags =
  2126. maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
  2127. return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
  2128. }
  2129. }
  2130. // Reset Operands to its original state.
  2131. Operands[0] = NestedAR;
  2132. }
  2133. }
  2134. // Okay, it looks like we really DO need an addrec expr. Check to see if we
  2135. // already have one, otherwise create a new one.
  2136. FoldingSetNodeID ID;
  2137. ID.AddInteger(scAddRecExpr);
  2138. for (unsigned i = 0, e = Operands.size(); i != e; ++i)
  2139. ID.AddPointer(Operands[i]);
  2140. ID.AddPointer(L);
  2141. void *IP = 0;
  2142. SCEVAddRecExpr *S =
  2143. static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
  2144. if (!S) {
  2145. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
  2146. std::uninitialized_copy(Operands.begin(), Operands.end(), O);
  2147. S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
  2148. O, Operands.size(), L);
  2149. UniqueSCEVs.InsertNode(S, IP);
  2150. }
  2151. S->setNoWrapFlags(Flags);
  2152. return S;
  2153. }
  2154. const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
  2155. const SCEV *RHS) {
  2156. SmallVector<const SCEV *, 2> Ops;
  2157. Ops.push_back(LHS);
  2158. Ops.push_back(RHS);
  2159. return getSMaxExpr(Ops);
  2160. }
  2161. const SCEV *
  2162. ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
  2163. assert(!Ops.empty() && "Cannot get empty smax!");
  2164. if (Ops.size() == 1) return Ops[0];
  2165. #ifndef NDEBUG
  2166. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  2167. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  2168. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  2169. "SCEVSMaxExpr operand types don't match!");
  2170. #endif
  2171. // Sort by complexity, this groups all similar expression types together.
  2172. GroupByComplexity(Ops, LI);
  2173. // If there are any constants, fold them together.
  2174. unsigned Idx = 0;
  2175. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  2176. ++Idx;
  2177. assert(Idx < Ops.size());
  2178. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  2179. // We found two constants, fold them together!
  2180. ConstantInt *Fold = ConstantInt::get(getContext(),
  2181. APIntOps::smax(LHSC->getValue()->getValue(),
  2182. RHSC->getValue()->getValue()));
  2183. Ops[0] = getConstant(Fold);
  2184. Ops.erase(Ops.begin()+1); // Erase the folded element
  2185. if (Ops.size() == 1) return Ops[0];
  2186. LHSC = cast<SCEVConstant>(Ops[0]);
  2187. }
  2188. // If we are left with a constant minimum-int, strip it off.
  2189. if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
  2190. Ops.erase(Ops.begin());
  2191. --Idx;
  2192. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
  2193. // If we have an smax with a constant maximum-int, it will always be
  2194. // maximum-int.
  2195. return Ops[0];
  2196. }
  2197. if (Ops.size() == 1) return Ops[0];
  2198. }
  2199. // Find the first SMax
  2200. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
  2201. ++Idx;
  2202. // Check to see if one of the operands is an SMax. If so, expand its operands
  2203. // onto our operand list, and recurse to simplify.
  2204. if (Idx < Ops.size()) {
  2205. bool DeletedSMax = false;
  2206. while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
  2207. Ops.erase(Ops.begin()+Idx);
  2208. Ops.append(SMax->op_begin(), SMax->op_end());
  2209. DeletedSMax = true;
  2210. }
  2211. if (DeletedSMax)
  2212. return getSMaxExpr(Ops);
  2213. }
  2214. // Okay, check to see if the same value occurs in the operand list twice. If
  2215. // so, delete one. Since we sorted the list, these values are required to
  2216. // be adjacent.
  2217. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
  2218. // X smax Y smax Y --> X smax Y
  2219. // X smax Y --> X, if X is always greater than Y
  2220. if (Ops[i] == Ops[i+1] ||
  2221. isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
  2222. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
  2223. --i; --e;
  2224. } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
  2225. Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
  2226. --i; --e;
  2227. }
  2228. if (Ops.size() == 1) return Ops[0];
  2229. assert(!Ops.empty() && "Reduced smax down to nothing!");
  2230. // Okay, it looks like we really DO need an smax expr. Check to see if we
  2231. // already have one, otherwise create a new one.
  2232. FoldingSetNodeID ID;
  2233. ID.AddInteger(scSMaxExpr);
  2234. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2235. ID.AddPointer(Ops[i]);
  2236. void *IP = 0;
  2237. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2238. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2239. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2240. SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
  2241. O, Ops.size());
  2242. UniqueSCEVs.InsertNode(S, IP);
  2243. return S;
  2244. }
  2245. const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
  2246. const SCEV *RHS) {
  2247. SmallVector<const SCEV *, 2> Ops;
  2248. Ops.push_back(LHS);
  2249. Ops.push_back(RHS);
  2250. return getUMaxExpr(Ops);
  2251. }
  2252. const SCEV *
  2253. ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
  2254. assert(!Ops.empty() && "Cannot get empty umax!");
  2255. if (Ops.size() == 1) return Ops[0];
  2256. #ifndef NDEBUG
  2257. Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
  2258. for (unsigned i = 1, e = Ops.size(); i != e; ++i)
  2259. assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
  2260. "SCEVUMaxExpr operand types don't match!");
  2261. #endif
  2262. // Sort by complexity, this groups all similar expression types together.
  2263. GroupByComplexity(Ops, LI);
  2264. // If there are any constants, fold them together.
  2265. unsigned Idx = 0;
  2266. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
  2267. ++Idx;
  2268. assert(Idx < Ops.size());
  2269. while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
  2270. // We found two constants, fold them together!
  2271. ConstantInt *Fold = ConstantInt::get(getContext(),
  2272. APIntOps::umax(LHSC->getValue()->getValue(),
  2273. RHSC->getValue()->getValue()));
  2274. Ops[0] = getConstant(Fold);
  2275. Ops.erase(Ops.begin()+1); // Erase the folded element
  2276. if (Ops.size() == 1) return Ops[0];
  2277. LHSC = cast<SCEVConstant>(Ops[0]);
  2278. }
  2279. // If we are left with a constant minimum-int, strip it off.
  2280. if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
  2281. Ops.erase(Ops.begin());
  2282. --Idx;
  2283. } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
  2284. // If we have an umax with a constant maximum-int, it will always be
  2285. // maximum-int.
  2286. return Ops[0];
  2287. }
  2288. if (Ops.size() == 1) return Ops[0];
  2289. }
  2290. // Find the first UMax
  2291. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
  2292. ++Idx;
  2293. // Check to see if one of the operands is a UMax. If so, expand its operands
  2294. // onto our operand list, and recurse to simplify.
  2295. if (Idx < Ops.size()) {
  2296. bool DeletedUMax = false;
  2297. while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
  2298. Ops.erase(Ops.begin()+Idx);
  2299. Ops.append(UMax->op_begin(), UMax->op_end());
  2300. DeletedUMax = true;
  2301. }
  2302. if (DeletedUMax)
  2303. return getUMaxExpr(Ops);
  2304. }
  2305. // Okay, check to see if the same value occurs in the operand list twice. If
  2306. // so, delete one. Since we sorted the list, these values are required to
  2307. // be adjacent.
  2308. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
  2309. // X umax Y umax Y --> X umax Y
  2310. // X umax Y --> X, if X is always greater than Y
  2311. if (Ops[i] == Ops[i+1] ||
  2312. isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
  2313. Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
  2314. --i; --e;
  2315. } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
  2316. Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
  2317. --i; --e;
  2318. }
  2319. if (Ops.size() == 1) return Ops[0];
  2320. assert(!Ops.empty() && "Reduced umax down to nothing!");
  2321. // Okay, it looks like we really DO need a umax expr. Check to see if we
  2322. // already have one, otherwise create a new one.
  2323. FoldingSetNodeID ID;
  2324. ID.AddInteger(scUMaxExpr);
  2325. for (unsigned i = 0, e = Ops.size(); i != e; ++i)
  2326. ID.AddPointer(Ops[i]);
  2327. void *IP = 0;
  2328. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
  2329. const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
  2330. std::uninitialized_copy(Ops.begin(), Ops.end(), O);
  2331. SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
  2332. O, Ops.size());
  2333. UniqueSCEVs.InsertNode(S, IP);
  2334. return S;
  2335. }
  2336. const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
  2337. const SCEV *RHS) {
  2338. // ~smax(~x, ~y) == smin(x, y).
  2339. return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
  2340. }
  2341. const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
  2342. const SCEV *RHS) {
  2343. // ~umax(~x, ~y) == umin(x, y)
  2344. return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
  2345. }
  2346. const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
  2347. // If we have DataLayout, we can bypass creating a target-independent
  2348. // constant expression and then folding it back into a ConstantInt.
  2349. // This is just a compile-time optimization.
  2350. if (TD)
  2351. return getConstant(TD->getIntPtrType(getContext()),
  2352. TD->getTypeAllocSize(AllocTy));
  2353. Constant *C = ConstantExpr::getSizeOf(AllocTy);
  2354. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
  2355. if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
  2356. C = Folded;
  2357. Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
  2358. return getTruncateOrZeroExtend(getSCEV(C), Ty);
  2359. }
  2360. const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
  2361. Constant *C = ConstantExpr::getAlignOf(AllocTy);
  2362. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
  2363. if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
  2364. C = Folded;
  2365. Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
  2366. return getTruncateOrZeroExtend(getSCEV(C), Ty);
  2367. }
  2368. const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
  2369. unsigned FieldNo) {
  2370. // If we have DataLayout, we can bypass creating a target-independent
  2371. // constant expression and then folding it back into a ConstantInt.
  2372. // This is just a compile-time optimization.
  2373. if (TD)
  2374. return getConstant(TD->getIntPtrType(getContext()),
  2375. TD->getStructLayout(STy)->getElementOffset(FieldNo));
  2376. Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
  2377. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
  2378. if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
  2379. C = Folded;
  2380. Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
  2381. return getTruncateOrZeroExtend(getSCEV(C), Ty);
  2382. }
  2383. const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
  2384. Constant *FieldNo) {
  2385. Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
  2386. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
  2387. if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
  2388. C = Folded;
  2389. Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
  2390. return getTruncateOrZeroExtend(getSCEV(C), Ty);
  2391. }
  2392. const SCEV *ScalarEvolution::getUnknown(Value *V) {
  2393. // Don't attempt to do anything other than create a SCEVUnknown object
  2394. // here. createSCEV only calls getUnknown after checking for all other
  2395. // interesting possibilities, and any other code that calls getUnknown
  2396. // is doing so in order to hide a value from SCEV canonicalization.
  2397. FoldingSetNodeID ID;
  2398. ID.AddInteger(scUnknown);
  2399. ID.AddPointer(V);
  2400. void *IP = 0;
  2401. if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
  2402. assert(cast<SCEVUnknown>(S)->getValue() == V &&
  2403. "Stale SCEVUnknown in uniquing map!");
  2404. return S;
  2405. }
  2406. SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
  2407. FirstUnknown);
  2408. FirstUnknown = cast<SCEVUnknown>(S);
  2409. UniqueSCEVs.InsertNode(S, IP);
  2410. return S;
  2411. }
  2412. //===----------------------------------------------------------------------===//
  2413. // Basic SCEV Analysis and PHI Idiom Recognition Code
  2414. //
  2415. /// isSCEVable - Test if values of the given type are analyzable within
  2416. /// the SCEV framework. This primarily includes integer types, and it
  2417. /// can optionally include pointer types if the ScalarEvolution class
  2418. /// has access to target-specific information.
  2419. bool ScalarEvolution::isSCEVable(Type *Ty) const {
  2420. // Integers and pointers are always SCEVable.
  2421. return Ty->isIntegerTy() || Ty->isPointerTy();
  2422. }
  2423. /// getTypeSizeInBits - Return the size in bits of the specified type,
  2424. /// for which isSCEVable must return true.
  2425. uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
  2426. assert(isSCEVable(Ty) && "Type is not SCEVable!");
  2427. // If we have a DataLayout, use it!
  2428. if (TD)
  2429. return TD->getTypeSizeInBits(Ty);
  2430. // Integer types have fixed sizes.
  2431. if (Ty->isIntegerTy())
  2432. return Ty->getPrimitiveSizeInBits();
  2433. // The only other support type is pointer. Without DataLayout, conservatively
  2434. // assume pointers are 64-bit.
  2435. assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
  2436. return 64;
  2437. }
  2438. /// getEffectiveSCEVType - Return a type with the same bitwidth as
  2439. /// the given type and which represents how SCEV will treat the given
  2440. /// type, for which isSCEVable must return true. For pointer types,
  2441. /// this is the pointer-sized integer type.
  2442. Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
  2443. assert(isSCEVable(Ty) && "Type is not SCEVable!");
  2444. if (Ty->isIntegerTy())
  2445. return Ty;
  2446. // The only other support type is pointer.
  2447. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
  2448. if (TD) return TD->getIntPtrType(getContext());
  2449. // Without DataLayout, conservatively assume pointers are 64-bit.
  2450. return Type::getInt64Ty(getContext());
  2451. }
  2452. const SCEV *ScalarEvolution::getCouldNotCompute() {
  2453. return &CouldNotCompute;
  2454. }
  2455. /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
  2456. /// expression and create a new one.
  2457. const SCEV *ScalarEvolution::getSCEV(Value *V) {
  2458. assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
  2459. ValueExprMapType::const_iterator I = ValueExprMap.find_as(V);
  2460. if (I != ValueExprMap.end()) return I->second;
  2461. const SCEV *S = createSCEV(V);
  2462. // The process of creating a SCEV for V may have caused other SCEVs
  2463. // to have been created, so it's necessary to insert the new entry
  2464. // from scratch, rather than trying to remember the insert position
  2465. // above.
  2466. ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
  2467. return S;
  2468. }
  2469. /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
  2470. ///
  2471. const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
  2472. if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
  2473. return getConstant(
  2474. cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
  2475. Type *Ty = V->getType();
  2476. Ty = getEffectiveSCEVType(Ty);
  2477. return getMulExpr(V,
  2478. getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
  2479. }
  2480. /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
  2481. const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
  2482. if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
  2483. return getConstant(
  2484. cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
  2485. Type *Ty = V->getType();
  2486. Ty = getEffectiveSCEVType(Ty);
  2487. const SCEV *AllOnes =
  2488. getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
  2489. return getMinusSCEV(AllOnes, V);
  2490. }
  2491. /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
  2492. const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
  2493. SCEV::NoWrapFlags Flags) {
  2494. assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
  2495. // Fast path: X - X --> 0.
  2496. if (LHS == RHS)
  2497. return getConstant(LHS->getType(), 0);
  2498. // X - Y --> X + -Y
  2499. return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
  2500. }
  2501. /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
  2502. /// input value to the specified type. If the type must be extended, it is zero
  2503. /// extended.
  2504. const SCEV *
  2505. ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
  2506. Type *SrcTy = V->getType();
  2507. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  2508. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  2509. "Cannot truncate or zero extend with non-integer arguments!");
  2510. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  2511. return V; // No conversion
  2512. if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
  2513. return getTruncateExpr(V, Ty);
  2514. return getZeroExtendExpr(V, Ty);
  2515. }
  2516. /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
  2517. /// input value to the specified type. If the type must be extended, it is sign
  2518. /// extended.
  2519. const SCEV *
  2520. ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
  2521. Type *Ty) {
  2522. Type *SrcTy = V->getType();
  2523. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  2524. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  2525. "Cannot truncate or zero extend with non-integer arguments!");
  2526. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  2527. return V; // No conversion
  2528. if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
  2529. return getTruncateExpr(V, Ty);
  2530. return getSignExtendExpr(V, Ty);
  2531. }
  2532. /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
  2533. /// input value to the specified type. If the type must be extended, it is zero
  2534. /// extended. The conversion must not be narrowing.
  2535. const SCEV *
  2536. ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
  2537. Type *SrcTy = V->getType();
  2538. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  2539. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  2540. "Cannot noop or zero extend with non-integer arguments!");
  2541. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  2542. "getNoopOrZeroExtend cannot truncate!");
  2543. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  2544. return V; // No conversion
  2545. return getZeroExtendExpr(V, Ty);
  2546. }
  2547. /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
  2548. /// input value to the specified type. If the type must be extended, it is sign
  2549. /// extended. The conversion must not be narrowing.
  2550. const SCEV *
  2551. ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
  2552. Type *SrcTy = V->getType();
  2553. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  2554. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  2555. "Cannot noop or sign extend with non-integer arguments!");
  2556. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  2557. "getNoopOrSignExtend cannot truncate!");
  2558. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  2559. return V; // No conversion
  2560. return getSignExtendExpr(V, Ty);
  2561. }
  2562. /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
  2563. /// the input value to the specified type. If the type must be extended,
  2564. /// it is extended with unspecified bits. The conversion must not be
  2565. /// narrowing.
  2566. const SCEV *
  2567. ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
  2568. Type *SrcTy = V->getType();
  2569. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  2570. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  2571. "Cannot noop or any extend with non-integer arguments!");
  2572. assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
  2573. "getNoopOrAnyExtend cannot truncate!");
  2574. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  2575. return V; // No conversion
  2576. return getAnyExtendExpr(V, Ty);
  2577. }
  2578. /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
  2579. /// input value to the specified type. The conversion must not be widening.
  2580. const SCEV *
  2581. ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
  2582. Type *SrcTy = V->getType();
  2583. assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
  2584. (Ty->isIntegerTy() || Ty->isPointerTy()) &&
  2585. "Cannot truncate or noop with non-integer arguments!");
  2586. assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
  2587. "getTruncateOrNoop cannot extend!");
  2588. if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
  2589. return V; // No conversion
  2590. return getTruncateExpr(V, Ty);
  2591. }
  2592. /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
  2593. /// the types using zero-extension, and then perform a umax operation
  2594. /// with them.
  2595. const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
  2596. const SCEV *RHS) {
  2597. const SCEV *PromotedLHS = LHS;
  2598. const SCEV *PromotedRHS = RHS;
  2599. if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
  2600. PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
  2601. else
  2602. PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
  2603. return getUMaxExpr(PromotedLHS, PromotedRHS);
  2604. }
  2605. /// getUMinFromMismatchedTypes - Promote the operands to the wider of
  2606. /// the types using zero-extension, and then perform a umin operation
  2607. /// with them.
  2608. const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
  2609. const SCEV *RHS) {
  2610. const SCEV *PromotedLHS = LHS;
  2611. const SCEV *PromotedRHS = RHS;
  2612. if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
  2613. PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
  2614. else
  2615. PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
  2616. return getUMinExpr(PromotedLHS, PromotedRHS);
  2617. }
  2618. /// getPointerBase - Transitively follow the chain of pointer-type operands
  2619. /// until reaching a SCEV that does not have a single pointer operand. This
  2620. /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
  2621. /// but corner cases do exist.
  2622. const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
  2623. // A pointer operand may evaluate to a nonpointer expression, such as null.
  2624. if (!V->getType()->isPointerTy())
  2625. return V;
  2626. if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
  2627. return getPointerBase(Cast->getOperand());
  2628. }
  2629. else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
  2630. const SCEV *PtrOp = 0;
  2631. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  2632. I != E; ++I) {
  2633. if ((*I)->getType()->isPointerTy()) {
  2634. // Cannot find the base of an expression with multiple pointer operands.
  2635. if (PtrOp)
  2636. return V;
  2637. PtrOp = *I;
  2638. }
  2639. }
  2640. if (!PtrOp)
  2641. return V;
  2642. return getPointerBase(PtrOp);
  2643. }
  2644. return V;
  2645. }
  2646. /// PushDefUseChildren - Push users of the given Instruction
  2647. /// onto the given Worklist.
  2648. static void
  2649. PushDefUseChildren(Instruction *I,
  2650. SmallVectorImpl<Instruction *> &Worklist) {
  2651. // Push the def-use children onto the Worklist stack.
  2652. for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
  2653. UI != UE; ++UI)
  2654. Worklist.push_back(cast<Instruction>(*UI));
  2655. }
  2656. /// ForgetSymbolicValue - This looks up computed SCEV values for all
  2657. /// instructions that depend on the given instruction and removes them from
  2658. /// the ValueExprMapType map if they reference SymName. This is used during PHI
  2659. /// resolution.
  2660. void
  2661. ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
  2662. SmallVector<Instruction *, 16> Worklist;
  2663. PushDefUseChildren(PN, Worklist);
  2664. SmallPtrSet<Instruction *, 8> Visited;
  2665. Visited.insert(PN);
  2666. while (!Worklist.empty()) {
  2667. Instruction *I = Worklist.pop_back_val();
  2668. if (!Visited.insert(I)) continue;
  2669. ValueExprMapType::iterator It =
  2670. ValueExprMap.find_as(static_cast<Value *>(I));
  2671. if (It != ValueExprMap.end()) {
  2672. const SCEV *Old = It->second;
  2673. // Short-circuit the def-use traversal if the symbolic name
  2674. // ceases to appear in expressions.
  2675. if (Old != SymName && !hasOperand(Old, SymName))
  2676. continue;
  2677. // SCEVUnknown for a PHI either means that it has an unrecognized
  2678. // structure, it's a PHI that's in the progress of being computed
  2679. // by createNodeForPHI, or it's a single-value PHI. In the first case,
  2680. // additional loop trip count information isn't going to change anything.
  2681. // In the second case, createNodeForPHI will perform the necessary
  2682. // updates on its own when it gets to that point. In the third, we do
  2683. // want to forget the SCEVUnknown.
  2684. if (!isa<PHINode>(I) ||
  2685. !isa<SCEVUnknown>(Old) ||
  2686. (I != PN && Old == SymName)) {
  2687. forgetMemoizedResults(Old);
  2688. ValueExprMap.erase(It);
  2689. }
  2690. }
  2691. PushDefUseChildren(I, Worklist);
  2692. }
  2693. }
  2694. /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
  2695. /// a loop header, making it a potential recurrence, or it doesn't.
  2696. ///
  2697. const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
  2698. if (const Loop *L = LI->getLoopFor(PN->getParent()))
  2699. if (L->getHeader() == PN->getParent()) {
  2700. // The loop may have multiple entrances or multiple exits; we can analyze
  2701. // this phi as an addrec if it has a unique entry value and a unique
  2702. // backedge value.
  2703. Value *BEValueV = 0, *StartValueV = 0;
  2704. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
  2705. Value *V = PN->getIncomingValue(i);
  2706. if (L->contains(PN->getIncomingBlock(i))) {
  2707. if (!BEValueV) {
  2708. BEValueV = V;
  2709. } else if (BEValueV != V) {
  2710. BEValueV = 0;
  2711. break;
  2712. }
  2713. } else if (!StartValueV) {
  2714. StartValueV = V;
  2715. } else if (StartValueV != V) {
  2716. StartValueV = 0;
  2717. break;
  2718. }
  2719. }
  2720. if (BEValueV && StartValueV) {
  2721. // While we are analyzing this PHI node, handle its value symbolically.
  2722. const SCEV *SymbolicName = getUnknown(PN);
  2723. assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
  2724. "PHI node already processed?");
  2725. ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
  2726. // Using this symbolic name for the PHI, analyze the value coming around
  2727. // the back-edge.
  2728. const SCEV *BEValue = getSCEV(BEValueV);
  2729. // NOTE: If BEValue is loop invariant, we know that the PHI node just
  2730. // has a special value for the first iteration of the loop.
  2731. // If the value coming around the backedge is an add with the symbolic
  2732. // value we just inserted, then we found a simple induction variable!
  2733. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
  2734. // If there is a single occurrence of the symbolic value, replace it
  2735. // with a recurrence.
  2736. unsigned FoundIndex = Add->getNumOperands();
  2737. for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
  2738. if (Add->getOperand(i) == SymbolicName)
  2739. if (FoundIndex == e) {
  2740. FoundIndex = i;
  2741. break;
  2742. }
  2743. if (FoundIndex != Add->getNumOperands()) {
  2744. // Create an add with everything but the specified operand.
  2745. SmallVector<const SCEV *, 8> Ops;
  2746. for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
  2747. if (i != FoundIndex)
  2748. Ops.push_back(Add->getOperand(i));
  2749. const SCEV *Accum = getAddExpr(Ops);
  2750. // This is not a valid addrec if the step amount is varying each
  2751. // loop iteration, but is not itself an addrec in this loop.
  2752. if (isLoopInvariant(Accum, L) ||
  2753. (isa<SCEVAddRecExpr>(Accum) &&
  2754. cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
  2755. SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
  2756. // If the increment doesn't overflow, then neither the addrec nor
  2757. // the post-increment will overflow.
  2758. if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
  2759. if (OBO->hasNoUnsignedWrap())
  2760. Flags = setFlags(Flags, SCEV::FlagNUW);
  2761. if (OBO->hasNoSignedWrap())
  2762. Flags = setFlags(Flags, SCEV::FlagNSW);
  2763. } else if (const GEPOperator *GEP =
  2764. dyn_cast<GEPOperator>(BEValueV)) {
  2765. // If the increment is an inbounds GEP, then we know the address
  2766. // space cannot be wrapped around. We cannot make any guarantee
  2767. // about signed or unsigned overflow because pointers are
  2768. // unsigned but we may have a negative index from the base
  2769. // pointer.
  2770. if (GEP->isInBounds())
  2771. Flags = setFlags(Flags, SCEV::FlagNW);
  2772. }
  2773. const SCEV *StartVal = getSCEV(StartValueV);
  2774. const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
  2775. // Since the no-wrap flags are on the increment, they apply to the
  2776. // post-incremented value as well.
  2777. if (isLoopInvariant(Accum, L))
  2778. (void)getAddRecExpr(getAddExpr(StartVal, Accum),
  2779. Accum, L, Flags);
  2780. // Okay, for the entire analysis of this edge we assumed the PHI
  2781. // to be symbolic. We now need to go back and purge all of the
  2782. // entries for the scalars that use the symbolic expression.
  2783. ForgetSymbolicName(PN, SymbolicName);
  2784. ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
  2785. return PHISCEV;
  2786. }
  2787. }
  2788. } else if (const SCEVAddRecExpr *AddRec =
  2789. dyn_cast<SCEVAddRecExpr>(BEValue)) {
  2790. // Otherwise, this could be a loop like this:
  2791. // i = 0; for (j = 1; ..; ++j) { .... i = j; }
  2792. // In this case, j = {1,+,1} and BEValue is j.
  2793. // Because the other in-value of i (0) fits the evolution of BEValue
  2794. // i really is an addrec evolution.
  2795. if (AddRec->getLoop() == L && AddRec->isAffine()) {
  2796. const SCEV *StartVal = getSCEV(StartValueV);
  2797. // If StartVal = j.start - j.stride, we can use StartVal as the
  2798. // initial step of the addrec evolution.
  2799. if (StartVal == getMinusSCEV(AddRec->getOperand(0),
  2800. AddRec->getOperand(1))) {
  2801. // FIXME: For constant StartVal, we should be able to infer
  2802. // no-wrap flags.
  2803. const SCEV *PHISCEV =
  2804. getAddRecExpr(StartVal, AddRec->getOperand(1), L,
  2805. SCEV::FlagAnyWrap);
  2806. // Okay, for the entire analysis of this edge we assumed the PHI
  2807. // to be symbolic. We now need to go back and purge all of the
  2808. // entries for the scalars that use the symbolic expression.
  2809. ForgetSymbolicName(PN, SymbolicName);
  2810. ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
  2811. return PHISCEV;
  2812. }
  2813. }
  2814. }
  2815. }
  2816. }
  2817. // If the PHI has a single incoming value, follow that value, unless the
  2818. // PHI's incoming blocks are in a different loop, in which case doing so
  2819. // risks breaking LCSSA form. Instcombine would normally zap these, but
  2820. // it doesn't have DominatorTree information, so it may miss cases.
  2821. if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
  2822. if (LI->replacementPreservesLCSSAForm(PN, V))
  2823. return getSCEV(V);
  2824. // If it's not a loop phi, we can't handle it yet.
  2825. return getUnknown(PN);
  2826. }
  2827. /// createNodeForGEP - Expand GEP instructions into add and multiply
  2828. /// operations. This allows them to be analyzed by regular SCEV code.
  2829. ///
  2830. const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
  2831. // Don't blindly transfer the inbounds flag from the GEP instruction to the
  2832. // Add expression, because the Instruction may be guarded by control flow
  2833. // and the no-overflow bits may not be valid for the expression in any
  2834. // context.
  2835. bool isInBounds = GEP->isInBounds();
  2836. Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
  2837. Value *Base = GEP->getOperand(0);
  2838. // Don't attempt to analyze GEPs over unsized objects.
  2839. if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
  2840. return getUnknown(GEP);
  2841. const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
  2842. gep_type_iterator GTI = gep_type_begin(GEP);
  2843. for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
  2844. E = GEP->op_end();
  2845. I != E; ++I) {
  2846. Value *Index = *I;
  2847. // Compute the (potentially symbolic) offset in bytes for this index.
  2848. if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
  2849. // For a struct, add the member offset.
  2850. unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
  2851. const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
  2852. // Add the field offset to the running total offset.
  2853. TotalOffset = getAddExpr(TotalOffset, FieldOffset);
  2854. } else {
  2855. // For an array, add the element offset, explicitly scaled.
  2856. const SCEV *ElementSize = getSizeOfExpr(*GTI);
  2857. const SCEV *IndexS = getSCEV(Index);
  2858. // Getelementptr indices are signed.
  2859. IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
  2860. // Multiply the index by the element size to compute the element offset.
  2861. const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize,
  2862. isInBounds ? SCEV::FlagNSW :
  2863. SCEV::FlagAnyWrap);
  2864. // Add the element offset to the running total offset.
  2865. TotalOffset = getAddExpr(TotalOffset, LocalOffset);
  2866. }
  2867. }
  2868. // Get the SCEV for the GEP base.
  2869. const SCEV *BaseS = getSCEV(Base);
  2870. // Add the total offset from all the GEP indices to the base.
  2871. return getAddExpr(BaseS, TotalOffset,
  2872. isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap);
  2873. }
  2874. /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
  2875. /// guaranteed to end in (at every loop iteration). It is, at the same time,
  2876. /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
  2877. /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
  2878. uint32_t
  2879. ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
  2880. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
  2881. return C->getValue()->getValue().countTrailingZeros();
  2882. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
  2883. return std::min(GetMinTrailingZeros(T->getOperand()),
  2884. (uint32_t)getTypeSizeInBits(T->getType()));
  2885. if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
  2886. uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
  2887. return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
  2888. getTypeSizeInBits(E->getType()) : OpRes;
  2889. }
  2890. if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
  2891. uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
  2892. return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
  2893. getTypeSizeInBits(E->getType()) : OpRes;
  2894. }
  2895. if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
  2896. // The result is the min of all operands results.
  2897. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
  2898. for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
  2899. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
  2900. return MinOpRes;
  2901. }
  2902. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
  2903. // The result is the sum of all operands results.
  2904. uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
  2905. uint32_t BitWidth = getTypeSizeInBits(M->getType());
  2906. for (unsigned i = 1, e = M->getNumOperands();
  2907. SumOpRes != BitWidth && i != e; ++i)
  2908. SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
  2909. BitWidth);
  2910. return SumOpRes;
  2911. }
  2912. if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
  2913. // The result is the min of all operands results.
  2914. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
  2915. for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
  2916. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
  2917. return MinOpRes;
  2918. }
  2919. if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
  2920. // The result is the min of all operands results.
  2921. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
  2922. for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
  2923. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
  2924. return MinOpRes;
  2925. }
  2926. if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
  2927. // The result is the min of all operands results.
  2928. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
  2929. for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
  2930. MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
  2931. return MinOpRes;
  2932. }
  2933. if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
  2934. // For a SCEVUnknown, ask ValueTracking.
  2935. unsigned BitWidth = getTypeSizeInBits(U->getType());
  2936. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
  2937. ComputeMaskedBits(U->getValue(), Zeros, Ones);
  2938. return Zeros.countTrailingOnes();
  2939. }
  2940. // SCEVUDivExpr
  2941. return 0;
  2942. }
  2943. /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
  2944. ///
  2945. ConstantRange
  2946. ScalarEvolution::getUnsignedRange(const SCEV *S) {
  2947. // See if we've computed this range already.
  2948. DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
  2949. if (I != UnsignedRanges.end())
  2950. return I->second;
  2951. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
  2952. return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
  2953. unsigned BitWidth = getTypeSizeInBits(S->getType());
  2954. ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
  2955. // If the value has known zeros, the maximum unsigned value will have those
  2956. // known zeros as well.
  2957. uint32_t TZ = GetMinTrailingZeros(S);
  2958. if (TZ != 0)
  2959. ConservativeResult =
  2960. ConstantRange(APInt::getMinValue(BitWidth),
  2961. APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
  2962. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
  2963. ConstantRange X = getUnsignedRange(Add->getOperand(0));
  2964. for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
  2965. X = X.add(getUnsignedRange(Add->getOperand(i)));
  2966. return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
  2967. }
  2968. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
  2969. ConstantRange X = getUnsignedRange(Mul->getOperand(0));
  2970. for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
  2971. X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
  2972. return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
  2973. }
  2974. if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
  2975. ConstantRange X = getUnsignedRange(SMax->getOperand(0));
  2976. for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
  2977. X = X.smax(getUnsignedRange(SMax->getOperand(i)));
  2978. return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
  2979. }
  2980. if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
  2981. ConstantRange X = getUnsignedRange(UMax->getOperand(0));
  2982. for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
  2983. X = X.umax(getUnsignedRange(UMax->getOperand(i)));
  2984. return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
  2985. }
  2986. if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
  2987. ConstantRange X = getUnsignedRange(UDiv->getLHS());
  2988. ConstantRange Y = getUnsignedRange(UDiv->getRHS());
  2989. return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
  2990. }
  2991. if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
  2992. ConstantRange X = getUnsignedRange(ZExt->getOperand());
  2993. return setUnsignedRange(ZExt,
  2994. ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
  2995. }
  2996. if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
  2997. ConstantRange X = getUnsignedRange(SExt->getOperand());
  2998. return setUnsignedRange(SExt,
  2999. ConservativeResult.intersectWith(X.signExtend(BitWidth)));
  3000. }
  3001. if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
  3002. ConstantRange X = getUnsignedRange(Trunc->getOperand());
  3003. return setUnsignedRange(Trunc,
  3004. ConservativeResult.intersectWith(X.truncate(BitWidth)));
  3005. }
  3006. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
  3007. // If there's no unsigned wrap, the value will never be less than its
  3008. // initial value.
  3009. if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
  3010. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
  3011. if (!C->getValue()->isZero())
  3012. ConservativeResult =
  3013. ConservativeResult.intersectWith(
  3014. ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
  3015. // TODO: non-affine addrec
  3016. if (AddRec->isAffine()) {
  3017. Type *Ty = AddRec->getType();
  3018. const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
  3019. if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
  3020. getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
  3021. MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
  3022. const SCEV *Start = AddRec->getStart();
  3023. const SCEV *Step = AddRec->getStepRecurrence(*this);
  3024. ConstantRange StartRange = getUnsignedRange(Start);
  3025. ConstantRange StepRange = getSignedRange(Step);
  3026. ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
  3027. ConstantRange EndRange =
  3028. StartRange.add(MaxBECountRange.multiply(StepRange));
  3029. // Check for overflow. This must be done with ConstantRange arithmetic
  3030. // because we could be called from within the ScalarEvolution overflow
  3031. // checking code.
  3032. ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
  3033. ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
  3034. ConstantRange ExtMaxBECountRange =
  3035. MaxBECountRange.zextOrTrunc(BitWidth*2+1);
  3036. ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
  3037. if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
  3038. ExtEndRange)
  3039. return setUnsignedRange(AddRec, ConservativeResult);
  3040. APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
  3041. EndRange.getUnsignedMin());
  3042. APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
  3043. EndRange.getUnsignedMax());
  3044. if (Min.isMinValue() && Max.isMaxValue())
  3045. return setUnsignedRange(AddRec, ConservativeResult);
  3046. return setUnsignedRange(AddRec,
  3047. ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
  3048. }
  3049. }
  3050. return setUnsignedRange(AddRec, ConservativeResult);
  3051. }
  3052. if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
  3053. // For a SCEVUnknown, ask ValueTracking.
  3054. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
  3055. ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
  3056. if (Ones == ~Zeros + 1)
  3057. return setUnsignedRange(U, ConservativeResult);
  3058. return setUnsignedRange(U,
  3059. ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
  3060. }
  3061. return setUnsignedRange(S, ConservativeResult);
  3062. }
  3063. /// getSignedRange - Determine the signed range for a particular SCEV.
  3064. ///
  3065. ConstantRange
  3066. ScalarEvolution::getSignedRange(const SCEV *S) {
  3067. // See if we've computed this range already.
  3068. DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
  3069. if (I != SignedRanges.end())
  3070. return I->second;
  3071. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
  3072. return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
  3073. unsigned BitWidth = getTypeSizeInBits(S->getType());
  3074. ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
  3075. // If the value has known zeros, the maximum signed value will have those
  3076. // known zeros as well.
  3077. uint32_t TZ = GetMinTrailingZeros(S);
  3078. if (TZ != 0)
  3079. ConservativeResult =
  3080. ConstantRange(APInt::getSignedMinValue(BitWidth),
  3081. APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
  3082. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
  3083. ConstantRange X = getSignedRange(Add->getOperand(0));
  3084. for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
  3085. X = X.add(getSignedRange(Add->getOperand(i)));
  3086. return setSignedRange(Add, ConservativeResult.intersectWith(X));
  3087. }
  3088. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
  3089. ConstantRange X = getSignedRange(Mul->getOperand(0));
  3090. for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
  3091. X = X.multiply(getSignedRange(Mul->getOperand(i)));
  3092. return setSignedRange(Mul, ConservativeResult.intersectWith(X));
  3093. }
  3094. if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
  3095. ConstantRange X = getSignedRange(SMax->getOperand(0));
  3096. for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
  3097. X = X.smax(getSignedRange(SMax->getOperand(i)));
  3098. return setSignedRange(SMax, ConservativeResult.intersectWith(X));
  3099. }
  3100. if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
  3101. ConstantRange X = getSignedRange(UMax->getOperand(0));
  3102. for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
  3103. X = X.umax(getSignedRange(UMax->getOperand(i)));
  3104. return setSignedRange(UMax, ConservativeResult.intersectWith(X));
  3105. }
  3106. if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
  3107. ConstantRange X = getSignedRange(UDiv->getLHS());
  3108. ConstantRange Y = getSignedRange(UDiv->getRHS());
  3109. return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
  3110. }
  3111. if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
  3112. ConstantRange X = getSignedRange(ZExt->getOperand());
  3113. return setSignedRange(ZExt,
  3114. ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
  3115. }
  3116. if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
  3117. ConstantRange X = getSignedRange(SExt->getOperand());
  3118. return setSignedRange(SExt,
  3119. ConservativeResult.intersectWith(X.signExtend(BitWidth)));
  3120. }
  3121. if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
  3122. ConstantRange X = getSignedRange(Trunc->getOperand());
  3123. return setSignedRange(Trunc,
  3124. ConservativeResult.intersectWith(X.truncate(BitWidth)));
  3125. }
  3126. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
  3127. // If there's no signed wrap, and all the operands have the same sign or
  3128. // zero, the value won't ever change sign.
  3129. if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
  3130. bool AllNonNeg = true;
  3131. bool AllNonPos = true;
  3132. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
  3133. if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
  3134. if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
  3135. }
  3136. if (AllNonNeg)
  3137. ConservativeResult = ConservativeResult.intersectWith(
  3138. ConstantRange(APInt(BitWidth, 0),
  3139. APInt::getSignedMinValue(BitWidth)));
  3140. else if (AllNonPos)
  3141. ConservativeResult = ConservativeResult.intersectWith(
  3142. ConstantRange(APInt::getSignedMinValue(BitWidth),
  3143. APInt(BitWidth, 1)));
  3144. }
  3145. // TODO: non-affine addrec
  3146. if (AddRec->isAffine()) {
  3147. Type *Ty = AddRec->getType();
  3148. const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
  3149. if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
  3150. getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
  3151. MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
  3152. const SCEV *Start = AddRec->getStart();
  3153. const SCEV *Step = AddRec->getStepRecurrence(*this);
  3154. ConstantRange StartRange = getSignedRange(Start);
  3155. ConstantRange StepRange = getSignedRange(Step);
  3156. ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
  3157. ConstantRange EndRange =
  3158. StartRange.add(MaxBECountRange.multiply(StepRange));
  3159. // Check for overflow. This must be done with ConstantRange arithmetic
  3160. // because we could be called from within the ScalarEvolution overflow
  3161. // checking code.
  3162. ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
  3163. ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
  3164. ConstantRange ExtMaxBECountRange =
  3165. MaxBECountRange.zextOrTrunc(BitWidth*2+1);
  3166. ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
  3167. if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
  3168. ExtEndRange)
  3169. return setSignedRange(AddRec, ConservativeResult);
  3170. APInt Min = APIntOps::smin(StartRange.getSignedMin(),
  3171. EndRange.getSignedMin());
  3172. APInt Max = APIntOps::smax(StartRange.getSignedMax(),
  3173. EndRange.getSignedMax());
  3174. if (Min.isMinSignedValue() && Max.isMaxSignedValue())
  3175. return setSignedRange(AddRec, ConservativeResult);
  3176. return setSignedRange(AddRec,
  3177. ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
  3178. }
  3179. }
  3180. return setSignedRange(AddRec, ConservativeResult);
  3181. }
  3182. if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
  3183. // For a SCEVUnknown, ask ValueTracking.
  3184. if (!U->getValue()->getType()->isIntegerTy() && !TD)
  3185. return setSignedRange(U, ConservativeResult);
  3186. unsigned NS = ComputeNumSignBits(U->getValue(), TD);
  3187. if (NS == 1)
  3188. return setSignedRange(U, ConservativeResult);
  3189. return setSignedRange(U, ConservativeResult.intersectWith(
  3190. ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
  3191. APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
  3192. }
  3193. return setSignedRange(S, ConservativeResult);
  3194. }
  3195. /// createSCEV - We know that there is no SCEV for the specified value.
  3196. /// Analyze the expression.
  3197. ///
  3198. const SCEV *ScalarEvolution::createSCEV(Value *V) {
  3199. if (!isSCEVable(V->getType()))
  3200. return getUnknown(V);
  3201. unsigned Opcode = Instruction::UserOp1;
  3202. if (Instruction *I = dyn_cast<Instruction>(V)) {
  3203. Opcode = I->getOpcode();
  3204. // Don't attempt to analyze instructions in blocks that aren't
  3205. // reachable. Such instructions don't matter, and they aren't required
  3206. // to obey basic rules for definitions dominating uses which this
  3207. // analysis depends on.
  3208. if (!DT->isReachableFromEntry(I->getParent()))
  3209. return getUnknown(V);
  3210. } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
  3211. Opcode = CE->getOpcode();
  3212. else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
  3213. return getConstant(CI);
  3214. else if (isa<ConstantPointerNull>(V))
  3215. return getConstant(V->getType(), 0);
  3216. else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
  3217. return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
  3218. else
  3219. return getUnknown(V);
  3220. Operator *U = cast<Operator>(V);
  3221. switch (Opcode) {
  3222. case Instruction::Add: {
  3223. // The simple thing to do would be to just call getSCEV on both operands
  3224. // and call getAddExpr with the result. However if we're looking at a
  3225. // bunch of things all added together, this can be quite inefficient,
  3226. // because it leads to N-1 getAddExpr calls for N ultimate operands.
  3227. // Instead, gather up all the operands and make a single getAddExpr call.
  3228. // LLVM IR canonical form means we need only traverse the left operands.
  3229. //
  3230. // Don't apply this instruction's NSW or NUW flags to the new
  3231. // expression. The instruction may be guarded by control flow that the
  3232. // no-wrap behavior depends on. Non-control-equivalent instructions can be
  3233. // mapped to the same SCEV expression, and it would be incorrect to transfer
  3234. // NSW/NUW semantics to those operations.
  3235. SmallVector<const SCEV *, 4> AddOps;
  3236. AddOps.push_back(getSCEV(U->getOperand(1)));
  3237. for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
  3238. unsigned Opcode = Op->getValueID() - Value::InstructionVal;
  3239. if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
  3240. break;
  3241. U = cast<Operator>(Op);
  3242. const SCEV *Op1 = getSCEV(U->getOperand(1));
  3243. if (Opcode == Instruction::Sub)
  3244. AddOps.push_back(getNegativeSCEV(Op1));
  3245. else
  3246. AddOps.push_back(Op1);
  3247. }
  3248. AddOps.push_back(getSCEV(U->getOperand(0)));
  3249. return getAddExpr(AddOps);
  3250. }
  3251. case Instruction::Mul: {
  3252. // Don't transfer NSW/NUW for the same reason as AddExpr.
  3253. SmallVector<const SCEV *, 4> MulOps;
  3254. MulOps.push_back(getSCEV(U->getOperand(1)));
  3255. for (Value *Op = U->getOperand(0);
  3256. Op->getValueID() == Instruction::Mul + Value::InstructionVal;
  3257. Op = U->getOperand(0)) {
  3258. U = cast<Operator>(Op);
  3259. MulOps.push_back(getSCEV(U->getOperand(1)));
  3260. }
  3261. MulOps.push_back(getSCEV(U->getOperand(0)));
  3262. return getMulExpr(MulOps);
  3263. }
  3264. case Instruction::UDiv:
  3265. return getUDivExpr(getSCEV(U->getOperand(0)),
  3266. getSCEV(U->getOperand(1)));
  3267. case Instruction::Sub:
  3268. return getMinusSCEV(getSCEV(U->getOperand(0)),
  3269. getSCEV(U->getOperand(1)));
  3270. case Instruction::And:
  3271. // For an expression like x&255 that merely masks off the high bits,
  3272. // use zext(trunc(x)) as the SCEV expression.
  3273. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3274. if (CI->isNullValue())
  3275. return getSCEV(U->getOperand(1));
  3276. if (CI->isAllOnesValue())
  3277. return getSCEV(U->getOperand(0));
  3278. const APInt &A = CI->getValue();
  3279. // Instcombine's ShrinkDemandedConstant may strip bits out of
  3280. // constants, obscuring what would otherwise be a low-bits mask.
  3281. // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
  3282. // knew about to reconstruct a low-bits mask value.
  3283. unsigned LZ = A.countLeadingZeros();
  3284. unsigned BitWidth = A.getBitWidth();
  3285. APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
  3286. ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
  3287. APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
  3288. if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
  3289. return
  3290. getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
  3291. IntegerType::get(getContext(), BitWidth - LZ)),
  3292. U->getType());
  3293. }
  3294. break;
  3295. case Instruction::Or:
  3296. // If the RHS of the Or is a constant, we may have something like:
  3297. // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
  3298. // optimizations will transparently handle this case.
  3299. //
  3300. // In order for this transformation to be safe, the LHS must be of the
  3301. // form X*(2^n) and the Or constant must be less than 2^n.
  3302. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3303. const SCEV *LHS = getSCEV(U->getOperand(0));
  3304. const APInt &CIVal = CI->getValue();
  3305. if (GetMinTrailingZeros(LHS) >=
  3306. (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
  3307. // Build a plain add SCEV.
  3308. const SCEV *S = getAddExpr(LHS, getSCEV(CI));
  3309. // If the LHS of the add was an addrec and it has no-wrap flags,
  3310. // transfer the no-wrap flags, since an or won't introduce a wrap.
  3311. if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
  3312. const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
  3313. const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
  3314. OldAR->getNoWrapFlags());
  3315. }
  3316. return S;
  3317. }
  3318. }
  3319. break;
  3320. case Instruction::Xor:
  3321. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3322. // If the RHS of the xor is a signbit, then this is just an add.
  3323. // Instcombine turns add of signbit into xor as a strength reduction step.
  3324. if (CI->getValue().isSignBit())
  3325. return getAddExpr(getSCEV(U->getOperand(0)),
  3326. getSCEV(U->getOperand(1)));
  3327. // If the RHS of xor is -1, then this is a not operation.
  3328. if (CI->isAllOnesValue())
  3329. return getNotSCEV(getSCEV(U->getOperand(0)));
  3330. // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
  3331. // This is a variant of the check for xor with -1, and it handles
  3332. // the case where instcombine has trimmed non-demanded bits out
  3333. // of an xor with -1.
  3334. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
  3335. if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
  3336. if (BO->getOpcode() == Instruction::And &&
  3337. LCI->getValue() == CI->getValue())
  3338. if (const SCEVZeroExtendExpr *Z =
  3339. dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
  3340. Type *UTy = U->getType();
  3341. const SCEV *Z0 = Z->getOperand();
  3342. Type *Z0Ty = Z0->getType();
  3343. unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
  3344. // If C is a low-bits mask, the zero extend is serving to
  3345. // mask off the high bits. Complement the operand and
  3346. // re-apply the zext.
  3347. if (APIntOps::isMask(Z0TySize, CI->getValue()))
  3348. return getZeroExtendExpr(getNotSCEV(Z0), UTy);
  3349. // If C is a single bit, it may be in the sign-bit position
  3350. // before the zero-extend. In this case, represent the xor
  3351. // using an add, which is equivalent, and re-apply the zext.
  3352. APInt Trunc = CI->getValue().trunc(Z0TySize);
  3353. if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
  3354. Trunc.isSignBit())
  3355. return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
  3356. UTy);
  3357. }
  3358. }
  3359. break;
  3360. case Instruction::Shl:
  3361. // Turn shift left of a constant amount into a multiply.
  3362. if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3363. uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
  3364. // If the shift count is not less than the bitwidth, the result of
  3365. // the shift is undefined. Don't try to analyze it, because the
  3366. // resolution chosen here may differ from the resolution chosen in
  3367. // other parts of the compiler.
  3368. if (SA->getValue().uge(BitWidth))
  3369. break;
  3370. Constant *X = ConstantInt::get(getContext(),
  3371. APInt(BitWidth, 1).shl(SA->getZExtValue()));
  3372. return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
  3373. }
  3374. break;
  3375. case Instruction::LShr:
  3376. // Turn logical shift right of a constant into a unsigned divide.
  3377. if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
  3378. uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
  3379. // If the shift count is not less than the bitwidth, the result of
  3380. // the shift is undefined. Don't try to analyze it, because the
  3381. // resolution chosen here may differ from the resolution chosen in
  3382. // other parts of the compiler.
  3383. if (SA->getValue().uge(BitWidth))
  3384. break;
  3385. Constant *X = ConstantInt::get(getContext(),
  3386. APInt(BitWidth, 1).shl(SA->getZExtValue()));
  3387. return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
  3388. }
  3389. break;
  3390. case Instruction::AShr:
  3391. // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
  3392. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
  3393. if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
  3394. if (L->getOpcode() == Instruction::Shl &&
  3395. L->getOperand(1) == U->getOperand(1)) {
  3396. uint64_t BitWidth = getTypeSizeInBits(U->getType());
  3397. // If the shift count is not less than the bitwidth, the result of
  3398. // the shift is undefined. Don't try to analyze it, because the
  3399. // resolution chosen here may differ from the resolution chosen in
  3400. // other parts of the compiler.
  3401. if (CI->getValue().uge(BitWidth))
  3402. break;
  3403. uint64_t Amt = BitWidth - CI->getZExtValue();
  3404. if (Amt == BitWidth)
  3405. return getSCEV(L->getOperand(0)); // shift by zero --> noop
  3406. return
  3407. getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
  3408. IntegerType::get(getContext(),
  3409. Amt)),
  3410. U->getType());
  3411. }
  3412. break;
  3413. case Instruction::Trunc:
  3414. return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
  3415. case Instruction::ZExt:
  3416. return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
  3417. case Instruction::SExt:
  3418. return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
  3419. case Instruction::BitCast:
  3420. // BitCasts are no-op casts so we just eliminate the cast.
  3421. if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
  3422. return getSCEV(U->getOperand(0));
  3423. break;
  3424. // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
  3425. // lead to pointer expressions which cannot safely be expanded to GEPs,
  3426. // because ScalarEvolution doesn't respect the GEP aliasing rules when
  3427. // simplifying integer expressions.
  3428. case Instruction::GetElementPtr:
  3429. return createNodeForGEP(cast<GEPOperator>(U));
  3430. case Instruction::PHI:
  3431. return createNodeForPHI(cast<PHINode>(U));
  3432. case Instruction::Select:
  3433. // This could be a smax or umax that was lowered earlier.
  3434. // Try to recover it.
  3435. if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
  3436. Value *LHS = ICI->getOperand(0);
  3437. Value *RHS = ICI->getOperand(1);
  3438. switch (ICI->getPredicate()) {
  3439. case ICmpInst::ICMP_SLT:
  3440. case ICmpInst::ICMP_SLE:
  3441. std::swap(LHS, RHS);
  3442. // fall through
  3443. case ICmpInst::ICMP_SGT:
  3444. case ICmpInst::ICMP_SGE:
  3445. // a >s b ? a+x : b+x -> smax(a, b)+x
  3446. // a >s b ? b+x : a+x -> smin(a, b)+x
  3447. if (LHS->getType() == U->getType()) {
  3448. const SCEV *LS = getSCEV(LHS);
  3449. const SCEV *RS = getSCEV(RHS);
  3450. const SCEV *LA = getSCEV(U->getOperand(1));
  3451. const SCEV *RA = getSCEV(U->getOperand(2));
  3452. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3453. const SCEV *RDiff = getMinusSCEV(RA, RS);
  3454. if (LDiff == RDiff)
  3455. return getAddExpr(getSMaxExpr(LS, RS), LDiff);
  3456. LDiff = getMinusSCEV(LA, RS);
  3457. RDiff = getMinusSCEV(RA, LS);
  3458. if (LDiff == RDiff)
  3459. return getAddExpr(getSMinExpr(LS, RS), LDiff);
  3460. }
  3461. break;
  3462. case ICmpInst::ICMP_ULT:
  3463. case ICmpInst::ICMP_ULE:
  3464. std::swap(LHS, RHS);
  3465. // fall through
  3466. case ICmpInst::ICMP_UGT:
  3467. case ICmpInst::ICMP_UGE:
  3468. // a >u b ? a+x : b+x -> umax(a, b)+x
  3469. // a >u b ? b+x : a+x -> umin(a, b)+x
  3470. if (LHS->getType() == U->getType()) {
  3471. const SCEV *LS = getSCEV(LHS);
  3472. const SCEV *RS = getSCEV(RHS);
  3473. const SCEV *LA = getSCEV(U->getOperand(1));
  3474. const SCEV *RA = getSCEV(U->getOperand(2));
  3475. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3476. const SCEV *RDiff = getMinusSCEV(RA, RS);
  3477. if (LDiff == RDiff)
  3478. return getAddExpr(getUMaxExpr(LS, RS), LDiff);
  3479. LDiff = getMinusSCEV(LA, RS);
  3480. RDiff = getMinusSCEV(RA, LS);
  3481. if (LDiff == RDiff)
  3482. return getAddExpr(getUMinExpr(LS, RS), LDiff);
  3483. }
  3484. break;
  3485. case ICmpInst::ICMP_NE:
  3486. // n != 0 ? n+x : 1+x -> umax(n, 1)+x
  3487. if (LHS->getType() == U->getType() &&
  3488. isa<ConstantInt>(RHS) &&
  3489. cast<ConstantInt>(RHS)->isZero()) {
  3490. const SCEV *One = getConstant(LHS->getType(), 1);
  3491. const SCEV *LS = getSCEV(LHS);
  3492. const SCEV *LA = getSCEV(U->getOperand(1));
  3493. const SCEV *RA = getSCEV(U->getOperand(2));
  3494. const SCEV *LDiff = getMinusSCEV(LA, LS);
  3495. const SCEV *RDiff = getMinusSCEV(RA, One);
  3496. if (LDiff == RDiff)
  3497. return getAddExpr(getUMaxExpr(One, LS), LDiff);
  3498. }
  3499. break;
  3500. case ICmpInst::ICMP_EQ:
  3501. // n == 0 ? 1+x : n+x -> umax(n, 1)+x
  3502. if (LHS->getType() == U->getType() &&
  3503. isa<ConstantInt>(RHS) &&
  3504. cast<ConstantInt>(RHS)->isZero()) {
  3505. const SCEV *One = getConstant(LHS->getType(), 1);
  3506. const SCEV *LS = getSCEV(LHS);
  3507. const SCEV *LA = getSCEV(U->getOperand(1));
  3508. const SCEV *RA = getSCEV(U->getOperand(2));
  3509. const SCEV *LDiff = getMinusSCEV(LA, One);
  3510. const SCEV *RDiff = getMinusSCEV(RA, LS);
  3511. if (LDiff == RDiff)
  3512. return getAddExpr(getUMaxExpr(One, LS), LDiff);
  3513. }
  3514. break;
  3515. default:
  3516. break;
  3517. }
  3518. }
  3519. default: // We cannot analyze this expression.
  3520. break;
  3521. }
  3522. return getUnknown(V);
  3523. }
  3524. //===----------------------------------------------------------------------===//
  3525. // Iteration Count Computation Code
  3526. //
  3527. /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
  3528. /// normal unsigned value. Returns 0 if the trip count is unknown or not
  3529. /// constant. Will also return 0 if the maximum trip count is very large (>=
  3530. /// 2^32).
  3531. ///
  3532. /// This "trip count" assumes that control exits via ExitingBlock. More
  3533. /// precisely, it is the number of times that control may reach ExitingBlock
  3534. /// before taking the branch. For loops with multiple exits, it may not be the
  3535. /// number times that the loop header executes because the loop may exit
  3536. /// prematurely via another branch.
  3537. unsigned ScalarEvolution::
  3538. getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock) {
  3539. const SCEVConstant *ExitCount =
  3540. dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
  3541. if (!ExitCount)
  3542. return 0;
  3543. ConstantInt *ExitConst = ExitCount->getValue();
  3544. // Guard against huge trip counts.
  3545. if (ExitConst->getValue().getActiveBits() > 32)
  3546. return 0;
  3547. // In case of integer overflow, this returns 0, which is correct.
  3548. return ((unsigned)ExitConst->getZExtValue()) + 1;
  3549. }
  3550. /// getSmallConstantTripMultiple - Returns the largest constant divisor of the
  3551. /// trip count of this loop as a normal unsigned value, if possible. This
  3552. /// means that the actual trip count is always a multiple of the returned
  3553. /// value (don't forget the trip count could very well be zero as well!).
  3554. ///
  3555. /// Returns 1 if the trip count is unknown or not guaranteed to be the
  3556. /// multiple of a constant (which is also the case if the trip count is simply
  3557. /// constant, use getSmallConstantTripCount for that case), Will also return 1
  3558. /// if the trip count is very large (>= 2^32).
  3559. ///
  3560. /// As explained in the comments for getSmallConstantTripCount, this assumes
  3561. /// that control exits the loop via ExitingBlock.
  3562. unsigned ScalarEvolution::
  3563. getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock) {
  3564. const SCEV *ExitCount = getExitCount(L, ExitingBlock);
  3565. if (ExitCount == getCouldNotCompute())
  3566. return 1;
  3567. // Get the trip count from the BE count by adding 1.
  3568. const SCEV *TCMul = getAddExpr(ExitCount,
  3569. getConstant(ExitCount->getType(), 1));
  3570. // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
  3571. // to factor simple cases.
  3572. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
  3573. TCMul = Mul->getOperand(0);
  3574. const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
  3575. if (!MulC)
  3576. return 1;
  3577. ConstantInt *Result = MulC->getValue();
  3578. // Guard against huge trip counts (this requires checking
  3579. // for zero to handle the case where the trip count == -1 and the
  3580. // addition wraps).
  3581. if (!Result || Result->getValue().getActiveBits() > 32 ||
  3582. Result->getValue().getActiveBits() == 0)
  3583. return 1;
  3584. return (unsigned)Result->getZExtValue();
  3585. }
  3586. // getExitCount - Get the expression for the number of loop iterations for which
  3587. // this loop is guaranteed not to exit via ExitintBlock. Otherwise return
  3588. // SCEVCouldNotCompute.
  3589. const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) {
  3590. return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
  3591. }
  3592. /// getBackedgeTakenCount - If the specified loop has a predictable
  3593. /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
  3594. /// object. The backedge-taken count is the number of times the loop header
  3595. /// will be branched to from within the loop. This is one less than the
  3596. /// trip count of the loop, since it doesn't count the first iteration,
  3597. /// when the header is branched to from outside the loop.
  3598. ///
  3599. /// Note that it is not valid to call this method on a loop without a
  3600. /// loop-invariant backedge-taken count (see
  3601. /// hasLoopInvariantBackedgeTakenCount).
  3602. ///
  3603. const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
  3604. return getBackedgeTakenInfo(L).getExact(this);
  3605. }
  3606. /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
  3607. /// return the least SCEV value that is known never to be less than the
  3608. /// actual backedge taken count.
  3609. const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
  3610. return getBackedgeTakenInfo(L).getMax(this);
  3611. }
  3612. /// PushLoopPHIs - Push PHI nodes in the header of the given loop
  3613. /// onto the given Worklist.
  3614. static void
  3615. PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
  3616. BasicBlock *Header = L->getHeader();
  3617. // Push all Loop-header PHIs onto the Worklist stack.
  3618. for (BasicBlock::iterator I = Header->begin();
  3619. PHINode *PN = dyn_cast<PHINode>(I); ++I)
  3620. Worklist.push_back(PN);
  3621. }
  3622. const ScalarEvolution::BackedgeTakenInfo &
  3623. ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
  3624. // Initially insert an invalid entry for this loop. If the insertion
  3625. // succeeds, proceed to actually compute a backedge-taken count and
  3626. // update the value. The temporary CouldNotCompute value tells SCEV
  3627. // code elsewhere that it shouldn't attempt to request a new
  3628. // backedge-taken count, which could result in infinite recursion.
  3629. std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
  3630. BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
  3631. if (!Pair.second)
  3632. return Pair.first->second;
  3633. // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
  3634. // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
  3635. // must be cleared in this scope.
  3636. BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
  3637. if (Result.getExact(this) != getCouldNotCompute()) {
  3638. assert(isLoopInvariant(Result.getExact(this), L) &&
  3639. isLoopInvariant(Result.getMax(this), L) &&
  3640. "Computed backedge-taken count isn't loop invariant for loop!");
  3641. ++NumTripCountsComputed;
  3642. }
  3643. else if (Result.getMax(this) == getCouldNotCompute() &&
  3644. isa<PHINode>(L->getHeader()->begin())) {
  3645. // Only count loops that have phi nodes as not being computable.
  3646. ++NumTripCountsNotComputed;
  3647. }
  3648. // Now that we know more about the trip count for this loop, forget any
  3649. // existing SCEV values for PHI nodes in this loop since they are only
  3650. // conservative estimates made without the benefit of trip count
  3651. // information. This is similar to the code in forgetLoop, except that
  3652. // it handles SCEVUnknown PHI nodes specially.
  3653. if (Result.hasAnyInfo()) {
  3654. SmallVector<Instruction *, 16> Worklist;
  3655. PushLoopPHIs(L, Worklist);
  3656. SmallPtrSet<Instruction *, 8> Visited;
  3657. while (!Worklist.empty()) {
  3658. Instruction *I = Worklist.pop_back_val();
  3659. if (!Visited.insert(I)) continue;
  3660. ValueExprMapType::iterator It =
  3661. ValueExprMap.find_as(static_cast<Value *>(I));
  3662. if (It != ValueExprMap.end()) {
  3663. const SCEV *Old = It->second;
  3664. // SCEVUnknown for a PHI either means that it has an unrecognized
  3665. // structure, or it's a PHI that's in the progress of being computed
  3666. // by createNodeForPHI. In the former case, additional loop trip
  3667. // count information isn't going to change anything. In the later
  3668. // case, createNodeForPHI will perform the necessary updates on its
  3669. // own when it gets to that point.
  3670. if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
  3671. forgetMemoizedResults(Old);
  3672. ValueExprMap.erase(It);
  3673. }
  3674. if (PHINode *PN = dyn_cast<PHINode>(I))
  3675. ConstantEvolutionLoopExitValue.erase(PN);
  3676. }
  3677. PushDefUseChildren(I, Worklist);
  3678. }
  3679. }
  3680. // Re-lookup the insert position, since the call to
  3681. // ComputeBackedgeTakenCount above could result in a
  3682. // recusive call to getBackedgeTakenInfo (on a different
  3683. // loop), which would invalidate the iterator computed
  3684. // earlier.
  3685. return BackedgeTakenCounts.find(L)->second = Result;
  3686. }
  3687. /// forgetLoop - This method should be called by the client when it has
  3688. /// changed a loop in a way that may effect ScalarEvolution's ability to
  3689. /// compute a trip count, or if the loop is deleted.
  3690. void ScalarEvolution::forgetLoop(const Loop *L) {
  3691. // Drop any stored trip count value.
  3692. DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos =
  3693. BackedgeTakenCounts.find(L);
  3694. if (BTCPos != BackedgeTakenCounts.end()) {
  3695. BTCPos->second.clear();
  3696. BackedgeTakenCounts.erase(BTCPos);
  3697. }
  3698. // Drop information about expressions based on loop-header PHIs.
  3699. SmallVector<Instruction *, 16> Worklist;
  3700. PushLoopPHIs(L, Worklist);
  3701. SmallPtrSet<Instruction *, 8> Visited;
  3702. while (!Worklist.empty()) {
  3703. Instruction *I = Worklist.pop_back_val();
  3704. if (!Visited.insert(I)) continue;
  3705. ValueExprMapType::iterator It =
  3706. ValueExprMap.find_as(static_cast<Value *>(I));
  3707. if (It != ValueExprMap.end()) {
  3708. forgetMemoizedResults(It->second);
  3709. ValueExprMap.erase(It);
  3710. if (PHINode *PN = dyn_cast<PHINode>(I))
  3711. ConstantEvolutionLoopExitValue.erase(PN);
  3712. }
  3713. PushDefUseChildren(I, Worklist);
  3714. }
  3715. // Forget all contained loops too, to avoid dangling entries in the
  3716. // ValuesAtScopes map.
  3717. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
  3718. forgetLoop(*I);
  3719. }
  3720. /// forgetValue - This method should be called by the client when it has
  3721. /// changed a value in a way that may effect its value, or which may
  3722. /// disconnect it from a def-use chain linking it to a loop.
  3723. void ScalarEvolution::forgetValue(Value *V) {
  3724. Instruction *I = dyn_cast<Instruction>(V);
  3725. if (!I) return;
  3726. // Drop information about expressions based on loop-header PHIs.
  3727. SmallVector<Instruction *, 16> Worklist;
  3728. Worklist.push_back(I);
  3729. SmallPtrSet<Instruction *, 8> Visited;
  3730. while (!Worklist.empty()) {
  3731. I = Worklist.pop_back_val();
  3732. if (!Visited.insert(I)) continue;
  3733. ValueExprMapType::iterator It =
  3734. ValueExprMap.find_as(static_cast<Value *>(I));
  3735. if (It != ValueExprMap.end()) {
  3736. forgetMemoizedResults(It->second);
  3737. ValueExprMap.erase(It);
  3738. if (PHINode *PN = dyn_cast<PHINode>(I))
  3739. ConstantEvolutionLoopExitValue.erase(PN);
  3740. }
  3741. PushDefUseChildren(I, Worklist);
  3742. }
  3743. }
  3744. /// getExact - Get the exact loop backedge taken count considering all loop
  3745. /// exits. A computable result can only be return for loops with a single exit.
  3746. /// Returning the minimum taken count among all exits is incorrect because one
  3747. /// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
  3748. /// the limit of each loop test is never skipped. This is a valid assumption as
  3749. /// long as the loop exits via that test. For precise results, it is the
  3750. /// caller's responsibility to specify the relevant loop exit using
  3751. /// getExact(ExitingBlock, SE).
  3752. const SCEV *
  3753. ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
  3754. // If any exits were not computable, the loop is not computable.
  3755. if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
  3756. // We need exactly one computable exit.
  3757. if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
  3758. assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
  3759. const SCEV *BECount = 0;
  3760. for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
  3761. ENT != 0; ENT = ENT->getNextExit()) {
  3762. assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
  3763. if (!BECount)
  3764. BECount = ENT->ExactNotTaken;
  3765. else if (BECount != ENT->ExactNotTaken)
  3766. return SE->getCouldNotCompute();
  3767. }
  3768. assert(BECount && "Invalid not taken count for loop exit");
  3769. return BECount;
  3770. }
  3771. /// getExact - Get the exact not taken count for this loop exit.
  3772. const SCEV *
  3773. ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
  3774. ScalarEvolution *SE) const {
  3775. for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
  3776. ENT != 0; ENT = ENT->getNextExit()) {
  3777. if (ENT->ExitingBlock == ExitingBlock)
  3778. return ENT->ExactNotTaken;
  3779. }
  3780. return SE->getCouldNotCompute();
  3781. }
  3782. /// getMax - Get the max backedge taken count for the loop.
  3783. const SCEV *
  3784. ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
  3785. return Max ? Max : SE->getCouldNotCompute();
  3786. }
  3787. /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
  3788. /// computable exit into a persistent ExitNotTakenInfo array.
  3789. ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
  3790. SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
  3791. bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
  3792. if (!Complete)
  3793. ExitNotTaken.setIncomplete();
  3794. unsigned NumExits = ExitCounts.size();
  3795. if (NumExits == 0) return;
  3796. ExitNotTaken.ExitingBlock = ExitCounts[0].first;
  3797. ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
  3798. if (NumExits == 1) return;
  3799. // Handle the rare case of multiple computable exits.
  3800. ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
  3801. ExitNotTakenInfo *PrevENT = &ExitNotTaken;
  3802. for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
  3803. PrevENT->setNextExit(ENT);
  3804. ENT->ExitingBlock = ExitCounts[i].first;
  3805. ENT->ExactNotTaken = ExitCounts[i].second;
  3806. }
  3807. }
  3808. /// clear - Invalidate this result and free the ExitNotTakenInfo array.
  3809. void ScalarEvolution::BackedgeTakenInfo::clear() {
  3810. ExitNotTaken.ExitingBlock = 0;
  3811. ExitNotTaken.ExactNotTaken = 0;
  3812. delete[] ExitNotTaken.getNextExit();
  3813. }
  3814. /// ComputeBackedgeTakenCount - Compute the number of times the backedge
  3815. /// of the specified loop will execute.
  3816. ScalarEvolution::BackedgeTakenInfo
  3817. ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
  3818. SmallVector<BasicBlock *, 8> ExitingBlocks;
  3819. L->getExitingBlocks(ExitingBlocks);
  3820. // Examine all exits and pick the most conservative values.
  3821. const SCEV *MaxBECount = getCouldNotCompute();
  3822. bool CouldComputeBECount = true;
  3823. SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
  3824. for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
  3825. ExitLimit EL = ComputeExitLimit(L, ExitingBlocks[i]);
  3826. if (EL.Exact == getCouldNotCompute())
  3827. // We couldn't compute an exact value for this exit, so
  3828. // we won't be able to compute an exact value for the loop.
  3829. CouldComputeBECount = false;
  3830. else
  3831. ExitCounts.push_back(std::make_pair(ExitingBlocks[i], EL.Exact));
  3832. if (MaxBECount == getCouldNotCompute())
  3833. MaxBECount = EL.Max;
  3834. else if (EL.Max != getCouldNotCompute()) {
  3835. // We cannot take the "min" MaxBECount, because non-unit stride loops may
  3836. // skip some loop tests. Taking the max over the exits is sufficiently
  3837. // conservative. TODO: We could do better taking into consideration
  3838. // that (1) the loop has unit stride (2) the last loop test is
  3839. // less-than/greater-than (3) any loop test is less-than/greater-than AND
  3840. // falls-through some constant times less then the other tests.
  3841. MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, EL.Max);
  3842. }
  3843. }
  3844. return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
  3845. }
  3846. /// ComputeExitLimit - Compute the number of times the backedge of the specified
  3847. /// loop will execute if it exits via the specified block.
  3848. ScalarEvolution::ExitLimit
  3849. ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
  3850. // Okay, we've chosen an exiting block. See what condition causes us to
  3851. // exit at this block.
  3852. //
  3853. // FIXME: we should be able to handle switch instructions (with a single exit)
  3854. BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
  3855. if (ExitBr == 0) return getCouldNotCompute();
  3856. assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
  3857. // At this point, we know we have a conditional branch that determines whether
  3858. // the loop is exited. However, we don't know if the branch is executed each
  3859. // time through the loop. If not, then the execution count of the branch will
  3860. // not be equal to the trip count of the loop.
  3861. //
  3862. // Currently we check for this by checking to see if the Exit branch goes to
  3863. // the loop header. If so, we know it will always execute the same number of
  3864. // times as the loop. We also handle the case where the exit block *is* the
  3865. // loop header. This is common for un-rotated loops.
  3866. //
  3867. // If both of those tests fail, walk up the unique predecessor chain to the
  3868. // header, stopping if there is an edge that doesn't exit the loop. If the
  3869. // header is reached, the execution count of the branch will be equal to the
  3870. // trip count of the loop.
  3871. //
  3872. // More extensive analysis could be done to handle more cases here.
  3873. //
  3874. if (ExitBr->getSuccessor(0) != L->getHeader() &&
  3875. ExitBr->getSuccessor(1) != L->getHeader() &&
  3876. ExitBr->getParent() != L->getHeader()) {
  3877. // The simple checks failed, try climbing the unique predecessor chain
  3878. // up to the header.
  3879. bool Ok = false;
  3880. for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
  3881. BasicBlock *Pred = BB->getUniquePredecessor();
  3882. if (!Pred)
  3883. return getCouldNotCompute();
  3884. TerminatorInst *PredTerm = Pred->getTerminator();
  3885. for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
  3886. BasicBlock *PredSucc = PredTerm->getSuccessor(i);
  3887. if (PredSucc == BB)
  3888. continue;
  3889. // If the predecessor has a successor that isn't BB and isn't
  3890. // outside the loop, assume the worst.
  3891. if (L->contains(PredSucc))
  3892. return getCouldNotCompute();
  3893. }
  3894. if (Pred == L->getHeader()) {
  3895. Ok = true;
  3896. break;
  3897. }
  3898. BB = Pred;
  3899. }
  3900. if (!Ok)
  3901. return getCouldNotCompute();
  3902. }
  3903. // Proceed to the next level to examine the exit condition expression.
  3904. return ComputeExitLimitFromCond(L, ExitBr->getCondition(),
  3905. ExitBr->getSuccessor(0),
  3906. ExitBr->getSuccessor(1));
  3907. }
  3908. /// ComputeExitLimitFromCond - Compute the number of times the
  3909. /// backedge of the specified loop will execute if its exit condition
  3910. /// were a conditional branch of ExitCond, TBB, and FBB.
  3911. ScalarEvolution::ExitLimit
  3912. ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
  3913. Value *ExitCond,
  3914. BasicBlock *TBB,
  3915. BasicBlock *FBB) {
  3916. // Check if the controlling expression for this loop is an And or Or.
  3917. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
  3918. if (BO->getOpcode() == Instruction::And) {
  3919. // Recurse on the operands of the and.
  3920. ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB);
  3921. ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB);
  3922. const SCEV *BECount = getCouldNotCompute();
  3923. const SCEV *MaxBECount = getCouldNotCompute();
  3924. if (L->contains(TBB)) {
  3925. // Both conditions must be true for the loop to continue executing.
  3926. // Choose the less conservative count.
  3927. if (EL0.Exact == getCouldNotCompute() ||
  3928. EL1.Exact == getCouldNotCompute())
  3929. BECount = getCouldNotCompute();
  3930. else
  3931. BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
  3932. if (EL0.Max == getCouldNotCompute())
  3933. MaxBECount = EL1.Max;
  3934. else if (EL1.Max == getCouldNotCompute())
  3935. MaxBECount = EL0.Max;
  3936. else
  3937. MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
  3938. } else {
  3939. // Both conditions must be true at the same time for the loop to exit.
  3940. // For now, be conservative.
  3941. assert(L->contains(FBB) && "Loop block has no successor in loop!");
  3942. if (EL0.Max == EL1.Max)
  3943. MaxBECount = EL0.Max;
  3944. if (EL0.Exact == EL1.Exact)
  3945. BECount = EL0.Exact;
  3946. }
  3947. return ExitLimit(BECount, MaxBECount);
  3948. }
  3949. if (BO->getOpcode() == Instruction::Or) {
  3950. // Recurse on the operands of the or.
  3951. ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB);
  3952. ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB);
  3953. const SCEV *BECount = getCouldNotCompute();
  3954. const SCEV *MaxBECount = getCouldNotCompute();
  3955. if (L->contains(FBB)) {
  3956. // Both conditions must be false for the loop to continue executing.
  3957. // Choose the less conservative count.
  3958. if (EL0.Exact == getCouldNotCompute() ||
  3959. EL1.Exact == getCouldNotCompute())
  3960. BECount = getCouldNotCompute();
  3961. else
  3962. BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
  3963. if (EL0.Max == getCouldNotCompute())
  3964. MaxBECount = EL1.Max;
  3965. else if (EL1.Max == getCouldNotCompute())
  3966. MaxBECount = EL0.Max;
  3967. else
  3968. MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
  3969. } else {
  3970. // Both conditions must be false at the same time for the loop to exit.
  3971. // For now, be conservative.
  3972. assert(L->contains(TBB) && "Loop block has no successor in loop!");
  3973. if (EL0.Max == EL1.Max)
  3974. MaxBECount = EL0.Max;
  3975. if (EL0.Exact == EL1.Exact)
  3976. BECount = EL0.Exact;
  3977. }
  3978. return ExitLimit(BECount, MaxBECount);
  3979. }
  3980. }
  3981. // With an icmp, it may be feasible to compute an exact backedge-taken count.
  3982. // Proceed to the next level to examine the icmp.
  3983. if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
  3984. return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB);
  3985. // Check for a constant condition. These are normally stripped out by
  3986. // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
  3987. // preserve the CFG and is temporarily leaving constant conditions
  3988. // in place.
  3989. if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
  3990. if (L->contains(FBB) == !CI->getZExtValue())
  3991. // The backedge is always taken.
  3992. return getCouldNotCompute();
  3993. else
  3994. // The backedge is never taken.
  3995. return getConstant(CI->getType(), 0);
  3996. }
  3997. // If it's not an integer or pointer comparison then compute it the hard way.
  3998. return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
  3999. }
  4000. /// ComputeExitLimitFromICmp - Compute the number of times the
  4001. /// backedge of the specified loop will execute if its exit condition
  4002. /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
  4003. ScalarEvolution::ExitLimit
  4004. ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
  4005. ICmpInst *ExitCond,
  4006. BasicBlock *TBB,
  4007. BasicBlock *FBB) {
  4008. // If the condition was exit on true, convert the condition to exit on false
  4009. ICmpInst::Predicate Cond;
  4010. if (!L->contains(FBB))
  4011. Cond = ExitCond->getPredicate();
  4012. else
  4013. Cond = ExitCond->getInversePredicate();
  4014. // Handle common loops like: for (X = "string"; *X; ++X)
  4015. if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
  4016. if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
  4017. ExitLimit ItCnt =
  4018. ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
  4019. if (ItCnt.hasAnyInfo())
  4020. return ItCnt;
  4021. }
  4022. const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
  4023. const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
  4024. // Try to evaluate any dependencies out of the loop.
  4025. LHS = getSCEVAtScope(LHS, L);
  4026. RHS = getSCEVAtScope(RHS, L);
  4027. // At this point, we would like to compute how many iterations of the
  4028. // loop the predicate will return true for these inputs.
  4029. if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
  4030. // If there is a loop-invariant, force it into the RHS.
  4031. std::swap(LHS, RHS);
  4032. Cond = ICmpInst::getSwappedPredicate(Cond);
  4033. }
  4034. // Simplify the operands before analyzing them.
  4035. (void)SimplifyICmpOperands(Cond, LHS, RHS);
  4036. // If we have a comparison of a chrec against a constant, try to use value
  4037. // ranges to answer this query.
  4038. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
  4039. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
  4040. if (AddRec->getLoop() == L) {
  4041. // Form the constant range.
  4042. ConstantRange CompRange(
  4043. ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
  4044. const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
  4045. if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
  4046. }
  4047. switch (Cond) {
  4048. case ICmpInst::ICMP_NE: { // while (X != Y)
  4049. // Convert to: while (X-Y != 0)
  4050. ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L);
  4051. if (EL.hasAnyInfo()) return EL;
  4052. break;
  4053. }
  4054. case ICmpInst::ICMP_EQ: { // while (X == Y)
  4055. // Convert to: while (X-Y == 0)
  4056. ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
  4057. if (EL.hasAnyInfo()) return EL;
  4058. break;
  4059. }
  4060. case ICmpInst::ICMP_SLT: {
  4061. ExitLimit EL = HowManyLessThans(LHS, RHS, L, true);
  4062. if (EL.hasAnyInfo()) return EL;
  4063. break;
  4064. }
  4065. case ICmpInst::ICMP_SGT: {
  4066. ExitLimit EL = HowManyLessThans(getNotSCEV(LHS),
  4067. getNotSCEV(RHS), L, true);
  4068. if (EL.hasAnyInfo()) return EL;
  4069. break;
  4070. }
  4071. case ICmpInst::ICMP_ULT: {
  4072. ExitLimit EL = HowManyLessThans(LHS, RHS, L, false);
  4073. if (EL.hasAnyInfo()) return EL;
  4074. break;
  4075. }
  4076. case ICmpInst::ICMP_UGT: {
  4077. ExitLimit EL = HowManyLessThans(getNotSCEV(LHS),
  4078. getNotSCEV(RHS), L, false);
  4079. if (EL.hasAnyInfo()) return EL;
  4080. break;
  4081. }
  4082. default:
  4083. #if 0
  4084. dbgs() << "ComputeBackedgeTakenCount ";
  4085. if (ExitCond->getOperand(0)->getType()->isUnsigned())
  4086. dbgs() << "[unsigned] ";
  4087. dbgs() << *LHS << " "
  4088. << Instruction::getOpcodeName(Instruction::ICmp)
  4089. << " " << *RHS << "\n";
  4090. #endif
  4091. break;
  4092. }
  4093. return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
  4094. }
  4095. static ConstantInt *
  4096. EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
  4097. ScalarEvolution &SE) {
  4098. const SCEV *InVal = SE.getConstant(C);
  4099. const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
  4100. assert(isa<SCEVConstant>(Val) &&
  4101. "Evaluation of SCEV at constant didn't fold correctly?");
  4102. return cast<SCEVConstant>(Val)->getValue();
  4103. }
  4104. /// ComputeLoadConstantCompareExitLimit - Given an exit condition of
  4105. /// 'icmp op load X, cst', try to see if we can compute the backedge
  4106. /// execution count.
  4107. ScalarEvolution::ExitLimit
  4108. ScalarEvolution::ComputeLoadConstantCompareExitLimit(
  4109. LoadInst *LI,
  4110. Constant *RHS,
  4111. const Loop *L,
  4112. ICmpInst::Predicate predicate) {
  4113. if (LI->isVolatile()) return getCouldNotCompute();
  4114. // Check to see if the loaded pointer is a getelementptr of a global.
  4115. // TODO: Use SCEV instead of manually grubbing with GEPs.
  4116. GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
  4117. if (!GEP) return getCouldNotCompute();
  4118. // Make sure that it is really a constant global we are gepping, with an
  4119. // initializer, and make sure the first IDX is really 0.
  4120. GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
  4121. if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
  4122. GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
  4123. !cast<Constant>(GEP->getOperand(1))->isNullValue())
  4124. return getCouldNotCompute();
  4125. // Okay, we allow one non-constant index into the GEP instruction.
  4126. Value *VarIdx = 0;
  4127. std::vector<Constant*> Indexes;
  4128. unsigned VarIdxNum = 0;
  4129. for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
  4130. if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
  4131. Indexes.push_back(CI);
  4132. } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
  4133. if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
  4134. VarIdx = GEP->getOperand(i);
  4135. VarIdxNum = i-2;
  4136. Indexes.push_back(0);
  4137. }
  4138. // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
  4139. if (!VarIdx)
  4140. return getCouldNotCompute();
  4141. // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
  4142. // Check to see if X is a loop variant variable value now.
  4143. const SCEV *Idx = getSCEV(VarIdx);
  4144. Idx = getSCEVAtScope(Idx, L);
  4145. // We can only recognize very limited forms of loop index expressions, in
  4146. // particular, only affine AddRec's like {C1,+,C2}.
  4147. const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
  4148. if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
  4149. !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
  4150. !isa<SCEVConstant>(IdxExpr->getOperand(1)))
  4151. return getCouldNotCompute();
  4152. unsigned MaxSteps = MaxBruteForceIterations;
  4153. for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
  4154. ConstantInt *ItCst = ConstantInt::get(
  4155. cast<IntegerType>(IdxExpr->getType()), IterationNum);
  4156. ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
  4157. // Form the GEP offset.
  4158. Indexes[VarIdxNum] = Val;
  4159. Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
  4160. Indexes);
  4161. if (Result == 0) break; // Cannot compute!
  4162. // Evaluate the condition for this iteration.
  4163. Result = ConstantExpr::getICmp(predicate, Result, RHS);
  4164. if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
  4165. if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
  4166. #if 0
  4167. dbgs() << "\n***\n*** Computed loop count " << *ItCst
  4168. << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
  4169. << "***\n";
  4170. #endif
  4171. ++NumArrayLenItCounts;
  4172. return getConstant(ItCst); // Found terminating iteration!
  4173. }
  4174. }
  4175. return getCouldNotCompute();
  4176. }
  4177. /// CanConstantFold - Return true if we can constant fold an instruction of the
  4178. /// specified type, assuming that all operands were constants.
  4179. static bool CanConstantFold(const Instruction *I) {
  4180. if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
  4181. isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
  4182. isa<LoadInst>(I))
  4183. return true;
  4184. if (const CallInst *CI = dyn_cast<CallInst>(I))
  4185. if (const Function *F = CI->getCalledFunction())
  4186. return canConstantFoldCallTo(F);
  4187. return false;
  4188. }
  4189. /// Determine whether this instruction can constant evolve within this loop
  4190. /// assuming its operands can all constant evolve.
  4191. static bool canConstantEvolve(Instruction *I, const Loop *L) {
  4192. // An instruction outside of the loop can't be derived from a loop PHI.
  4193. if (!L->contains(I)) return false;
  4194. if (isa<PHINode>(I)) {
  4195. if (L->getHeader() == I->getParent())
  4196. return true;
  4197. else
  4198. // We don't currently keep track of the control flow needed to evaluate
  4199. // PHIs, so we cannot handle PHIs inside of loops.
  4200. return false;
  4201. }
  4202. // If we won't be able to constant fold this expression even if the operands
  4203. // are constants, bail early.
  4204. return CanConstantFold(I);
  4205. }
  4206. /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
  4207. /// recursing through each instruction operand until reaching a loop header phi.
  4208. static PHINode *
  4209. getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
  4210. DenseMap<Instruction *, PHINode *> &PHIMap) {
  4211. // Otherwise, we can evaluate this instruction if all of its operands are
  4212. // constant or derived from a PHI node themselves.
  4213. PHINode *PHI = 0;
  4214. for (Instruction::op_iterator OpI = UseInst->op_begin(),
  4215. OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
  4216. if (isa<Constant>(*OpI)) continue;
  4217. Instruction *OpInst = dyn_cast<Instruction>(*OpI);
  4218. if (!OpInst || !canConstantEvolve(OpInst, L)) return 0;
  4219. PHINode *P = dyn_cast<PHINode>(OpInst);
  4220. if (!P)
  4221. // If this operand is already visited, reuse the prior result.
  4222. // We may have P != PHI if this is the deepest point at which the
  4223. // inconsistent paths meet.
  4224. P = PHIMap.lookup(OpInst);
  4225. if (!P) {
  4226. // Recurse and memoize the results, whether a phi is found or not.
  4227. // This recursive call invalidates pointers into PHIMap.
  4228. P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
  4229. PHIMap[OpInst] = P;
  4230. }
  4231. if (P == 0) return 0; // Not evolving from PHI
  4232. if (PHI && PHI != P) return 0; // Evolving from multiple different PHIs.
  4233. PHI = P;
  4234. }
  4235. // This is a expression evolving from a constant PHI!
  4236. return PHI;
  4237. }
  4238. /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
  4239. /// in the loop that V is derived from. We allow arbitrary operations along the
  4240. /// way, but the operands of an operation must either be constants or a value
  4241. /// derived from a constant PHI. If this expression does not fit with these
  4242. /// constraints, return null.
  4243. static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
  4244. Instruction *I = dyn_cast<Instruction>(V);
  4245. if (I == 0 || !canConstantEvolve(I, L)) return 0;
  4246. if (PHINode *PN = dyn_cast<PHINode>(I)) {
  4247. return PN;
  4248. }
  4249. // Record non-constant instructions contained by the loop.
  4250. DenseMap<Instruction *, PHINode *> PHIMap;
  4251. return getConstantEvolvingPHIOperands(I, L, PHIMap);
  4252. }
  4253. /// EvaluateExpression - Given an expression that passes the
  4254. /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
  4255. /// in the loop has the value PHIVal. If we can't fold this expression for some
  4256. /// reason, return null.
  4257. static Constant *EvaluateExpression(Value *V, const Loop *L,
  4258. DenseMap<Instruction *, Constant *> &Vals,
  4259. const DataLayout *TD,
  4260. const TargetLibraryInfo *TLI) {
  4261. // Convenient constant check, but redundant for recursive calls.
  4262. if (Constant *C = dyn_cast<Constant>(V)) return C;
  4263. Instruction *I = dyn_cast<Instruction>(V);
  4264. if (!I) return 0;
  4265. if (Constant *C = Vals.lookup(I)) return C;
  4266. // An instruction inside the loop depends on a value outside the loop that we
  4267. // weren't given a mapping for, or a value such as a call inside the loop.
  4268. if (!canConstantEvolve(I, L)) return 0;
  4269. // An unmapped PHI can be due to a branch or another loop inside this loop,
  4270. // or due to this not being the initial iteration through a loop where we
  4271. // couldn't compute the evolution of this particular PHI last time.
  4272. if (isa<PHINode>(I)) return 0;
  4273. std::vector<Constant*> Operands(I->getNumOperands());
  4274. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  4275. Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
  4276. if (!Operand) {
  4277. Operands[i] = dyn_cast<Constant>(I->getOperand(i));
  4278. if (!Operands[i]) return 0;
  4279. continue;
  4280. }
  4281. Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
  4282. Vals[Operand] = C;
  4283. if (!C) return 0;
  4284. Operands[i] = C;
  4285. }
  4286. if (CmpInst *CI = dyn_cast<CmpInst>(I))
  4287. return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
  4288. Operands[1], TD, TLI);
  4289. if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
  4290. if (!LI->isVolatile())
  4291. return ConstantFoldLoadFromConstPtr(Operands[0], TD);
  4292. }
  4293. return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
  4294. TLI);
  4295. }
  4296. /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
  4297. /// in the header of its containing loop, we know the loop executes a
  4298. /// constant number of times, and the PHI node is just a recurrence
  4299. /// involving constants, fold it.
  4300. Constant *
  4301. ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
  4302. const APInt &BEs,
  4303. const Loop *L) {
  4304. DenseMap<PHINode*, Constant*>::const_iterator I =
  4305. ConstantEvolutionLoopExitValue.find(PN);
  4306. if (I != ConstantEvolutionLoopExitValue.end())
  4307. return I->second;
  4308. if (BEs.ugt(MaxBruteForceIterations))
  4309. return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
  4310. Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
  4311. DenseMap<Instruction *, Constant *> CurrentIterVals;
  4312. BasicBlock *Header = L->getHeader();
  4313. assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
  4314. // Since the loop is canonicalized, the PHI node must have two entries. One
  4315. // entry must be a constant (coming in from outside of the loop), and the
  4316. // second must be derived from the same PHI.
  4317. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
  4318. PHINode *PHI = 0;
  4319. for (BasicBlock::iterator I = Header->begin();
  4320. (PHI = dyn_cast<PHINode>(I)); ++I) {
  4321. Constant *StartCST =
  4322. dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
  4323. if (StartCST == 0) continue;
  4324. CurrentIterVals[PHI] = StartCST;
  4325. }
  4326. if (!CurrentIterVals.count(PN))
  4327. return RetVal = 0;
  4328. Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
  4329. // Execute the loop symbolically to determine the exit value.
  4330. if (BEs.getActiveBits() >= 32)
  4331. return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
  4332. unsigned NumIterations = BEs.getZExtValue(); // must be in range
  4333. unsigned IterationNum = 0;
  4334. for (; ; ++IterationNum) {
  4335. if (IterationNum == NumIterations)
  4336. return RetVal = CurrentIterVals[PN]; // Got exit value!
  4337. // Compute the value of the PHIs for the next iteration.
  4338. // EvaluateExpression adds non-phi values to the CurrentIterVals map.
  4339. DenseMap<Instruction *, Constant *> NextIterVals;
  4340. Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
  4341. TLI);
  4342. if (NextPHI == 0)
  4343. return 0; // Couldn't evaluate!
  4344. NextIterVals[PN] = NextPHI;
  4345. bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
  4346. // Also evaluate the other PHI nodes. However, we don't get to stop if we
  4347. // cease to be able to evaluate one of them or if they stop evolving,
  4348. // because that doesn't necessarily prevent us from computing PN.
  4349. SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
  4350. for (DenseMap<Instruction *, Constant *>::const_iterator
  4351. I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
  4352. PHINode *PHI = dyn_cast<PHINode>(I->first);
  4353. if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
  4354. PHIsToCompute.push_back(std::make_pair(PHI, I->second));
  4355. }
  4356. // We use two distinct loops because EvaluateExpression may invalidate any
  4357. // iterators into CurrentIterVals.
  4358. for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
  4359. I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
  4360. PHINode *PHI = I->first;
  4361. Constant *&NextPHI = NextIterVals[PHI];
  4362. if (!NextPHI) { // Not already computed.
  4363. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
  4364. NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
  4365. }
  4366. if (NextPHI != I->second)
  4367. StoppedEvolving = false;
  4368. }
  4369. // If all entries in CurrentIterVals == NextIterVals then we can stop
  4370. // iterating, the loop can't continue to change.
  4371. if (StoppedEvolving)
  4372. return RetVal = CurrentIterVals[PN];
  4373. CurrentIterVals.swap(NextIterVals);
  4374. }
  4375. }
  4376. /// ComputeExitCountExhaustively - If the loop is known to execute a
  4377. /// constant number of times (the condition evolves only from constants),
  4378. /// try to evaluate a few iterations of the loop until we get the exit
  4379. /// condition gets a value of ExitWhen (true or false). If we cannot
  4380. /// evaluate the trip count of the loop, return getCouldNotCompute().
  4381. const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
  4382. Value *Cond,
  4383. bool ExitWhen) {
  4384. PHINode *PN = getConstantEvolvingPHI(Cond, L);
  4385. if (PN == 0) return getCouldNotCompute();
  4386. // If the loop is canonicalized, the PHI will have exactly two entries.
  4387. // That's the only form we support here.
  4388. if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
  4389. DenseMap<Instruction *, Constant *> CurrentIterVals;
  4390. BasicBlock *Header = L->getHeader();
  4391. assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
  4392. // One entry must be a constant (coming in from outside of the loop), and the
  4393. // second must be derived from the same PHI.
  4394. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
  4395. PHINode *PHI = 0;
  4396. for (BasicBlock::iterator I = Header->begin();
  4397. (PHI = dyn_cast<PHINode>(I)); ++I) {
  4398. Constant *StartCST =
  4399. dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
  4400. if (StartCST == 0) continue;
  4401. CurrentIterVals[PHI] = StartCST;
  4402. }
  4403. if (!CurrentIterVals.count(PN))
  4404. return getCouldNotCompute();
  4405. // Okay, we find a PHI node that defines the trip count of this loop. Execute
  4406. // the loop symbolically to determine when the condition gets a value of
  4407. // "ExitWhen".
  4408. unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
  4409. for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
  4410. ConstantInt *CondVal =
  4411. dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
  4412. TD, TLI));
  4413. // Couldn't symbolically evaluate.
  4414. if (!CondVal) return getCouldNotCompute();
  4415. if (CondVal->getValue() == uint64_t(ExitWhen)) {
  4416. ++NumBruteForceTripCountsComputed;
  4417. return getConstant(Type::getInt32Ty(getContext()), IterationNum);
  4418. }
  4419. // Update all the PHI nodes for the next iteration.
  4420. DenseMap<Instruction *, Constant *> NextIterVals;
  4421. // Create a list of which PHIs we need to compute. We want to do this before
  4422. // calling EvaluateExpression on them because that may invalidate iterators
  4423. // into CurrentIterVals.
  4424. SmallVector<PHINode *, 8> PHIsToCompute;
  4425. for (DenseMap<Instruction *, Constant *>::const_iterator
  4426. I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
  4427. PHINode *PHI = dyn_cast<PHINode>(I->first);
  4428. if (!PHI || PHI->getParent() != Header) continue;
  4429. PHIsToCompute.push_back(PHI);
  4430. }
  4431. for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
  4432. E = PHIsToCompute.end(); I != E; ++I) {
  4433. PHINode *PHI = *I;
  4434. Constant *&NextPHI = NextIterVals[PHI];
  4435. if (NextPHI) continue; // Already computed!
  4436. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
  4437. NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
  4438. }
  4439. CurrentIterVals.swap(NextIterVals);
  4440. }
  4441. // Too many iterations were needed to evaluate.
  4442. return getCouldNotCompute();
  4443. }
  4444. /// getSCEVAtScope - Return a SCEV expression for the specified value
  4445. /// at the specified scope in the program. The L value specifies a loop
  4446. /// nest to evaluate the expression at, where null is the top-level or a
  4447. /// specified loop is immediately inside of the loop.
  4448. ///
  4449. /// This method can be used to compute the exit value for a variable defined
  4450. /// in a loop by querying what the value will hold in the parent loop.
  4451. ///
  4452. /// In the case that a relevant loop exit value cannot be computed, the
  4453. /// original value V is returned.
  4454. const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
  4455. // Check to see if we've folded this expression at this loop before.
  4456. std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
  4457. std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
  4458. Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
  4459. if (!Pair.second)
  4460. return Pair.first->second ? Pair.first->second : V;
  4461. // Otherwise compute it.
  4462. const SCEV *C = computeSCEVAtScope(V, L);
  4463. ValuesAtScopes[V][L] = C;
  4464. return C;
  4465. }
  4466. /// This builds up a Constant using the ConstantExpr interface. That way, we
  4467. /// will return Constants for objects which aren't represented by a
  4468. /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
  4469. /// Returns NULL if the SCEV isn't representable as a Constant.
  4470. static Constant *BuildConstantFromSCEV(const SCEV *V) {
  4471. switch (V->getSCEVType()) {
  4472. default: // TODO: smax, umax.
  4473. case scCouldNotCompute:
  4474. case scAddRecExpr:
  4475. break;
  4476. case scConstant:
  4477. return cast<SCEVConstant>(V)->getValue();
  4478. case scUnknown:
  4479. return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
  4480. case scSignExtend: {
  4481. const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
  4482. if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
  4483. return ConstantExpr::getSExt(CastOp, SS->getType());
  4484. break;
  4485. }
  4486. case scZeroExtend: {
  4487. const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
  4488. if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
  4489. return ConstantExpr::getZExt(CastOp, SZ->getType());
  4490. break;
  4491. }
  4492. case scTruncate: {
  4493. const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
  4494. if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
  4495. return ConstantExpr::getTrunc(CastOp, ST->getType());
  4496. break;
  4497. }
  4498. case scAddExpr: {
  4499. const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
  4500. if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
  4501. if (C->getType()->isPointerTy())
  4502. C = ConstantExpr::getBitCast(C, Type::getInt8PtrTy(C->getContext()));
  4503. for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
  4504. Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
  4505. if (!C2) return 0;
  4506. // First pointer!
  4507. if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
  4508. std::swap(C, C2);
  4509. // The offsets have been converted to bytes. We can add bytes to an
  4510. // i8* by GEP with the byte count in the first index.
  4511. C = ConstantExpr::getBitCast(C,Type::getInt8PtrTy(C->getContext()));
  4512. }
  4513. // Don't bother trying to sum two pointers. We probably can't
  4514. // statically compute a load that results from it anyway.
  4515. if (C2->getType()->isPointerTy())
  4516. return 0;
  4517. if (C->getType()->isPointerTy()) {
  4518. if (cast<PointerType>(C->getType())->getElementType()->isStructTy())
  4519. C2 = ConstantExpr::getIntegerCast(
  4520. C2, Type::getInt32Ty(C->getContext()), true);
  4521. C = ConstantExpr::getGetElementPtr(C, C2);
  4522. } else
  4523. C = ConstantExpr::getAdd(C, C2);
  4524. }
  4525. return C;
  4526. }
  4527. break;
  4528. }
  4529. case scMulExpr: {
  4530. const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
  4531. if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
  4532. // Don't bother with pointers at all.
  4533. if (C->getType()->isPointerTy()) return 0;
  4534. for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
  4535. Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
  4536. if (!C2 || C2->getType()->isPointerTy()) return 0;
  4537. C = ConstantExpr::getMul(C, C2);
  4538. }
  4539. return C;
  4540. }
  4541. break;
  4542. }
  4543. case scUDivExpr: {
  4544. const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
  4545. if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
  4546. if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
  4547. if (LHS->getType() == RHS->getType())
  4548. return ConstantExpr::getUDiv(LHS, RHS);
  4549. break;
  4550. }
  4551. }
  4552. return 0;
  4553. }
  4554. const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
  4555. if (isa<SCEVConstant>(V)) return V;
  4556. // If this instruction is evolved from a constant-evolving PHI, compute the
  4557. // exit value from the loop without using SCEVs.
  4558. if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
  4559. if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
  4560. const Loop *LI = (*this->LI)[I->getParent()];
  4561. if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
  4562. if (PHINode *PN = dyn_cast<PHINode>(I))
  4563. if (PN->getParent() == LI->getHeader()) {
  4564. // Okay, there is no closed form solution for the PHI node. Check
  4565. // to see if the loop that contains it has a known backedge-taken
  4566. // count. If so, we may be able to force computation of the exit
  4567. // value.
  4568. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
  4569. if (const SCEVConstant *BTCC =
  4570. dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
  4571. // Okay, we know how many times the containing loop executes. If
  4572. // this is a constant evolving PHI node, get the final value at
  4573. // the specified iteration number.
  4574. Constant *RV = getConstantEvolutionLoopExitValue(PN,
  4575. BTCC->getValue()->getValue(),
  4576. LI);
  4577. if (RV) return getSCEV(RV);
  4578. }
  4579. }
  4580. // Okay, this is an expression that we cannot symbolically evaluate
  4581. // into a SCEV. Check to see if it's possible to symbolically evaluate
  4582. // the arguments into constants, and if so, try to constant propagate the
  4583. // result. This is particularly useful for computing loop exit values.
  4584. if (CanConstantFold(I)) {
  4585. SmallVector<Constant *, 4> Operands;
  4586. bool MadeImprovement = false;
  4587. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
  4588. Value *Op = I->getOperand(i);
  4589. if (Constant *C = dyn_cast<Constant>(Op)) {
  4590. Operands.push_back(C);
  4591. continue;
  4592. }
  4593. // If any of the operands is non-constant and if they are
  4594. // non-integer and non-pointer, don't even try to analyze them
  4595. // with scev techniques.
  4596. if (!isSCEVable(Op->getType()))
  4597. return V;
  4598. const SCEV *OrigV = getSCEV(Op);
  4599. const SCEV *OpV = getSCEVAtScope(OrigV, L);
  4600. MadeImprovement |= OrigV != OpV;
  4601. Constant *C = BuildConstantFromSCEV(OpV);
  4602. if (!C) return V;
  4603. if (C->getType() != Op->getType())
  4604. C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
  4605. Op->getType(),
  4606. false),
  4607. C, Op->getType());
  4608. Operands.push_back(C);
  4609. }
  4610. // Check to see if getSCEVAtScope actually made an improvement.
  4611. if (MadeImprovement) {
  4612. Constant *C = 0;
  4613. if (const CmpInst *CI = dyn_cast<CmpInst>(I))
  4614. C = ConstantFoldCompareInstOperands(CI->getPredicate(),
  4615. Operands[0], Operands[1], TD,
  4616. TLI);
  4617. else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
  4618. if (!LI->isVolatile())
  4619. C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
  4620. } else
  4621. C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
  4622. Operands, TD, TLI);
  4623. if (!C) return V;
  4624. return getSCEV(C);
  4625. }
  4626. }
  4627. }
  4628. // This is some other type of SCEVUnknown, just return it.
  4629. return V;
  4630. }
  4631. if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
  4632. // Avoid performing the look-up in the common case where the specified
  4633. // expression has no loop-variant portions.
  4634. for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
  4635. const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
  4636. if (OpAtScope != Comm->getOperand(i)) {
  4637. // Okay, at least one of these operands is loop variant but might be
  4638. // foldable. Build a new instance of the folded commutative expression.
  4639. SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
  4640. Comm->op_begin()+i);
  4641. NewOps.push_back(OpAtScope);
  4642. for (++i; i != e; ++i) {
  4643. OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
  4644. NewOps.push_back(OpAtScope);
  4645. }
  4646. if (isa<SCEVAddExpr>(Comm))
  4647. return getAddExpr(NewOps);
  4648. if (isa<SCEVMulExpr>(Comm))
  4649. return getMulExpr(NewOps);
  4650. if (isa<SCEVSMaxExpr>(Comm))
  4651. return getSMaxExpr(NewOps);
  4652. if (isa<SCEVUMaxExpr>(Comm))
  4653. return getUMaxExpr(NewOps);
  4654. llvm_unreachable("Unknown commutative SCEV type!");
  4655. }
  4656. }
  4657. // If we got here, all operands are loop invariant.
  4658. return Comm;
  4659. }
  4660. if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
  4661. const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
  4662. const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
  4663. if (LHS == Div->getLHS() && RHS == Div->getRHS())
  4664. return Div; // must be loop invariant
  4665. return getUDivExpr(LHS, RHS);
  4666. }
  4667. // If this is a loop recurrence for a loop that does not contain L, then we
  4668. // are dealing with the final value computed by the loop.
  4669. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
  4670. // First, attempt to evaluate each operand.
  4671. // Avoid performing the look-up in the common case where the specified
  4672. // expression has no loop-variant portions.
  4673. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
  4674. const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
  4675. if (OpAtScope == AddRec->getOperand(i))
  4676. continue;
  4677. // Okay, at least one of these operands is loop variant but might be
  4678. // foldable. Build a new instance of the folded commutative expression.
  4679. SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
  4680. AddRec->op_begin()+i);
  4681. NewOps.push_back(OpAtScope);
  4682. for (++i; i != e; ++i)
  4683. NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
  4684. const SCEV *FoldedRec =
  4685. getAddRecExpr(NewOps, AddRec->getLoop(),
  4686. AddRec->getNoWrapFlags(SCEV::FlagNW));
  4687. AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
  4688. // The addrec may be folded to a nonrecurrence, for example, if the
  4689. // induction variable is multiplied by zero after constant folding. Go
  4690. // ahead and return the folded value.
  4691. if (!AddRec)
  4692. return FoldedRec;
  4693. break;
  4694. }
  4695. // If the scope is outside the addrec's loop, evaluate it by using the
  4696. // loop exit value of the addrec.
  4697. if (!AddRec->getLoop()->contains(L)) {
  4698. // To evaluate this recurrence, we need to know how many times the AddRec
  4699. // loop iterates. Compute this now.
  4700. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
  4701. if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
  4702. // Then, evaluate the AddRec.
  4703. return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
  4704. }
  4705. return AddRec;
  4706. }
  4707. if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
  4708. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  4709. if (Op == Cast->getOperand())
  4710. return Cast; // must be loop invariant
  4711. return getZeroExtendExpr(Op, Cast->getType());
  4712. }
  4713. if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
  4714. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  4715. if (Op == Cast->getOperand())
  4716. return Cast; // must be loop invariant
  4717. return getSignExtendExpr(Op, Cast->getType());
  4718. }
  4719. if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
  4720. const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
  4721. if (Op == Cast->getOperand())
  4722. return Cast; // must be loop invariant
  4723. return getTruncateExpr(Op, Cast->getType());
  4724. }
  4725. llvm_unreachable("Unknown SCEV type!");
  4726. }
  4727. /// getSCEVAtScope - This is a convenience function which does
  4728. /// getSCEVAtScope(getSCEV(V), L).
  4729. const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
  4730. return getSCEVAtScope(getSCEV(V), L);
  4731. }
  4732. /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
  4733. /// following equation:
  4734. ///
  4735. /// A * X = B (mod N)
  4736. ///
  4737. /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
  4738. /// A and B isn't important.
  4739. ///
  4740. /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
  4741. static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
  4742. ScalarEvolution &SE) {
  4743. uint32_t BW = A.getBitWidth();
  4744. assert(BW == B.getBitWidth() && "Bit widths must be the same.");
  4745. assert(A != 0 && "A must be non-zero.");
  4746. // 1. D = gcd(A, N)
  4747. //
  4748. // The gcd of A and N may have only one prime factor: 2. The number of
  4749. // trailing zeros in A is its multiplicity
  4750. uint32_t Mult2 = A.countTrailingZeros();
  4751. // D = 2^Mult2
  4752. // 2. Check if B is divisible by D.
  4753. //
  4754. // B is divisible by D if and only if the multiplicity of prime factor 2 for B
  4755. // is not less than multiplicity of this prime factor for D.
  4756. if (B.countTrailingZeros() < Mult2)
  4757. return SE.getCouldNotCompute();
  4758. // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
  4759. // modulo (N / D).
  4760. //
  4761. // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
  4762. // bit width during computations.
  4763. APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
  4764. APInt Mod(BW + 1, 0);
  4765. Mod.setBit(BW - Mult2); // Mod = N / D
  4766. APInt I = AD.multiplicativeInverse(Mod);
  4767. // 4. Compute the minimum unsigned root of the equation:
  4768. // I * (B / D) mod (N / D)
  4769. APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
  4770. // The result is guaranteed to be less than 2^BW so we may truncate it to BW
  4771. // bits.
  4772. return SE.getConstant(Result.trunc(BW));
  4773. }
  4774. /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
  4775. /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
  4776. /// might be the same) or two SCEVCouldNotCompute objects.
  4777. ///
  4778. static std::pair<const SCEV *,const SCEV *>
  4779. SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
  4780. assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
  4781. const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
  4782. const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
  4783. const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
  4784. // We currently can only solve this if the coefficients are constants.
  4785. if (!LC || !MC || !NC) {
  4786. const SCEV *CNC = SE.getCouldNotCompute();
  4787. return std::make_pair(CNC, CNC);
  4788. }
  4789. uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
  4790. const APInt &L = LC->getValue()->getValue();
  4791. const APInt &M = MC->getValue()->getValue();
  4792. const APInt &N = NC->getValue()->getValue();
  4793. APInt Two(BitWidth, 2);
  4794. APInt Four(BitWidth, 4);
  4795. {
  4796. using namespace APIntOps;
  4797. const APInt& C = L;
  4798. // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
  4799. // The B coefficient is M-N/2
  4800. APInt B(M);
  4801. B -= sdiv(N,Two);
  4802. // The A coefficient is N/2
  4803. APInt A(N.sdiv(Two));
  4804. // Compute the B^2-4ac term.
  4805. APInt SqrtTerm(B);
  4806. SqrtTerm *= B;
  4807. SqrtTerm -= Four * (A * C);
  4808. if (SqrtTerm.isNegative()) {
  4809. // The loop is provably infinite.
  4810. const SCEV *CNC = SE.getCouldNotCompute();
  4811. return std::make_pair(CNC, CNC);
  4812. }
  4813. // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
  4814. // integer value or else APInt::sqrt() will assert.
  4815. APInt SqrtVal(SqrtTerm.sqrt());
  4816. // Compute the two solutions for the quadratic formula.
  4817. // The divisions must be performed as signed divisions.
  4818. APInt NegB(-B);
  4819. APInt TwoA(A << 1);
  4820. if (TwoA.isMinValue()) {
  4821. const SCEV *CNC = SE.getCouldNotCompute();
  4822. return std::make_pair(CNC, CNC);
  4823. }
  4824. LLVMContext &Context = SE.getContext();
  4825. ConstantInt *Solution1 =
  4826. ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
  4827. ConstantInt *Solution2 =
  4828. ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
  4829. return std::make_pair(SE.getConstant(Solution1),
  4830. SE.getConstant(Solution2));
  4831. } // end APIntOps namespace
  4832. }
  4833. /// HowFarToZero - Return the number of times a backedge comparing the specified
  4834. /// value to zero will execute. If not computable, return CouldNotCompute.
  4835. ///
  4836. /// This is only used for loops with a "x != y" exit test. The exit condition is
  4837. /// now expressed as a single expression, V = x-y. So the exit test is
  4838. /// effectively V != 0. We know and take advantage of the fact that this
  4839. /// expression only being used in a comparison by zero context.
  4840. ScalarEvolution::ExitLimit
  4841. ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
  4842. // If the value is a constant
  4843. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
  4844. // If the value is already zero, the branch will execute zero times.
  4845. if (C->getValue()->isZero()) return C;
  4846. return getCouldNotCompute(); // Otherwise it will loop infinitely.
  4847. }
  4848. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
  4849. if (!AddRec || AddRec->getLoop() != L)
  4850. return getCouldNotCompute();
  4851. // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
  4852. // the quadratic equation to solve it.
  4853. if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
  4854. std::pair<const SCEV *,const SCEV *> Roots =
  4855. SolveQuadraticEquation(AddRec, *this);
  4856. const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
  4857. const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
  4858. if (R1 && R2) {
  4859. #if 0
  4860. dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
  4861. << " sol#2: " << *R2 << "\n";
  4862. #endif
  4863. // Pick the smallest positive root value.
  4864. if (ConstantInt *CB =
  4865. dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
  4866. R1->getValue(),
  4867. R2->getValue()))) {
  4868. if (CB->getZExtValue() == false)
  4869. std::swap(R1, R2); // R1 is the minimum root now.
  4870. // We can only use this value if the chrec ends up with an exact zero
  4871. // value at this index. When solving for "X*X != 5", for example, we
  4872. // should not accept a root of 2.
  4873. const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
  4874. if (Val->isZero())
  4875. return R1; // We found a quadratic root!
  4876. }
  4877. }
  4878. return getCouldNotCompute();
  4879. }
  4880. // Otherwise we can only handle this if it is affine.
  4881. if (!AddRec->isAffine())
  4882. return getCouldNotCompute();
  4883. // If this is an affine expression, the execution count of this branch is
  4884. // the minimum unsigned root of the following equation:
  4885. //
  4886. // Start + Step*N = 0 (mod 2^BW)
  4887. //
  4888. // equivalent to:
  4889. //
  4890. // Step*N = -Start (mod 2^BW)
  4891. //
  4892. // where BW is the common bit width of Start and Step.
  4893. // Get the initial value for the loop.
  4894. const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
  4895. const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
  4896. // For now we handle only constant steps.
  4897. //
  4898. // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
  4899. // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
  4900. // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
  4901. // We have not yet seen any such cases.
  4902. const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
  4903. if (StepC == 0 || StepC->getValue()->equalsInt(0))
  4904. return getCouldNotCompute();
  4905. // For positive steps (counting up until unsigned overflow):
  4906. // N = -Start/Step (as unsigned)
  4907. // For negative steps (counting down to zero):
  4908. // N = Start/-Step
  4909. // First compute the unsigned distance from zero in the direction of Step.
  4910. bool CountDown = StepC->getValue()->getValue().isNegative();
  4911. const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
  4912. // Handle unitary steps, which cannot wraparound.
  4913. // 1*N = -Start; -1*N = Start (mod 2^BW), so:
  4914. // N = Distance (as unsigned)
  4915. if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
  4916. ConstantRange CR = getUnsignedRange(Start);
  4917. const SCEV *MaxBECount;
  4918. if (!CountDown && CR.getUnsignedMin().isMinValue())
  4919. // When counting up, the worst starting value is 1, not 0.
  4920. MaxBECount = CR.getUnsignedMax().isMinValue()
  4921. ? getConstant(APInt::getMinValue(CR.getBitWidth()))
  4922. : getConstant(APInt::getMaxValue(CR.getBitWidth()));
  4923. else
  4924. MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
  4925. : -CR.getUnsignedMin());
  4926. return ExitLimit(Distance, MaxBECount);
  4927. }
  4928. // If the recurrence is known not to wraparound, unsigned divide computes the
  4929. // back edge count. We know that the value will either become zero (and thus
  4930. // the loop terminates), that the loop will terminate through some other exit
  4931. // condition first, or that the loop has undefined behavior. This means
  4932. // we can't "miss" the exit value, even with nonunit stride.
  4933. //
  4934. // FIXME: Prove that loops always exhibits *acceptable* undefined
  4935. // behavior. Loops must exhibit defined behavior until a wrapped value is
  4936. // actually used. So the trip count computed by udiv could be smaller than the
  4937. // number of well-defined iterations.
  4938. if (AddRec->getNoWrapFlags(SCEV::FlagNW)) {
  4939. // FIXME: We really want an "isexact" bit for udiv.
  4940. return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
  4941. }
  4942. // Then, try to solve the above equation provided that Start is constant.
  4943. if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
  4944. return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
  4945. -StartC->getValue()->getValue(),
  4946. *this);
  4947. return getCouldNotCompute();
  4948. }
  4949. /// HowFarToNonZero - Return the number of times a backedge checking the
  4950. /// specified value for nonzero will execute. If not computable, return
  4951. /// CouldNotCompute
  4952. ScalarEvolution::ExitLimit
  4953. ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
  4954. // Loops that look like: while (X == 0) are very strange indeed. We don't
  4955. // handle them yet except for the trivial case. This could be expanded in the
  4956. // future as needed.
  4957. // If the value is a constant, check to see if it is known to be non-zero
  4958. // already. If so, the backedge will execute zero times.
  4959. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
  4960. if (!C->getValue()->isNullValue())
  4961. return getConstant(C->getType(), 0);
  4962. return getCouldNotCompute(); // Otherwise it will loop infinitely.
  4963. }
  4964. // We could implement others, but I really doubt anyone writes loops like
  4965. // this, and if they did, they would already be constant folded.
  4966. return getCouldNotCompute();
  4967. }
  4968. /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
  4969. /// (which may not be an immediate predecessor) which has exactly one
  4970. /// successor from which BB is reachable, or null if no such block is
  4971. /// found.
  4972. ///
  4973. std::pair<BasicBlock *, BasicBlock *>
  4974. ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
  4975. // If the block has a unique predecessor, then there is no path from the
  4976. // predecessor to the block that does not go through the direct edge
  4977. // from the predecessor to the block.
  4978. if (BasicBlock *Pred = BB->getSinglePredecessor())
  4979. return std::make_pair(Pred, BB);
  4980. // A loop's header is defined to be a block that dominates the loop.
  4981. // If the header has a unique predecessor outside the loop, it must be
  4982. // a block that has exactly one successor that can reach the loop.
  4983. if (Loop *L = LI->getLoopFor(BB))
  4984. return std::make_pair(L->getLoopPredecessor(), L->getHeader());
  4985. return std::pair<BasicBlock *, BasicBlock *>();
  4986. }
  4987. /// HasSameValue - SCEV structural equivalence is usually sufficient for
  4988. /// testing whether two expressions are equal, however for the purposes of
  4989. /// looking for a condition guarding a loop, it can be useful to be a little
  4990. /// more general, since a front-end may have replicated the controlling
  4991. /// expression.
  4992. ///
  4993. static bool HasSameValue(const SCEV *A, const SCEV *B) {
  4994. // Quick check to see if they are the same SCEV.
  4995. if (A == B) return true;
  4996. // Otherwise, if they're both SCEVUnknown, it's possible that they hold
  4997. // two different instructions with the same value. Check for this case.
  4998. if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
  4999. if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
  5000. if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
  5001. if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
  5002. if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
  5003. return true;
  5004. // Otherwise assume they may have a different value.
  5005. return false;
  5006. }
  5007. /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
  5008. /// predicate Pred. Return true iff any changes were made.
  5009. ///
  5010. bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
  5011. const SCEV *&LHS, const SCEV *&RHS,
  5012. unsigned Depth) {
  5013. bool Changed = false;
  5014. // If we hit the max recursion limit bail out.
  5015. if (Depth >= 3)
  5016. return false;
  5017. // Canonicalize a constant to the right side.
  5018. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
  5019. // Check for both operands constant.
  5020. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
  5021. if (ConstantExpr::getICmp(Pred,
  5022. LHSC->getValue(),
  5023. RHSC->getValue())->isNullValue())
  5024. goto trivially_false;
  5025. else
  5026. goto trivially_true;
  5027. }
  5028. // Otherwise swap the operands to put the constant on the right.
  5029. std::swap(LHS, RHS);
  5030. Pred = ICmpInst::getSwappedPredicate(Pred);
  5031. Changed = true;
  5032. }
  5033. // If we're comparing an addrec with a value which is loop-invariant in the
  5034. // addrec's loop, put the addrec on the left. Also make a dominance check,
  5035. // as both operands could be addrecs loop-invariant in each other's loop.
  5036. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
  5037. const Loop *L = AR->getLoop();
  5038. if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
  5039. std::swap(LHS, RHS);
  5040. Pred = ICmpInst::getSwappedPredicate(Pred);
  5041. Changed = true;
  5042. }
  5043. }
  5044. // If there's a constant operand, canonicalize comparisons with boundary
  5045. // cases, and canonicalize *-or-equal comparisons to regular comparisons.
  5046. if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
  5047. const APInt &RA = RC->getValue()->getValue();
  5048. switch (Pred) {
  5049. default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  5050. case ICmpInst::ICMP_EQ:
  5051. case ICmpInst::ICMP_NE:
  5052. // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
  5053. if (!RA)
  5054. if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
  5055. if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
  5056. if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
  5057. ME->getOperand(0)->isAllOnesValue()) {
  5058. RHS = AE->getOperand(1);
  5059. LHS = ME->getOperand(1);
  5060. Changed = true;
  5061. }
  5062. break;
  5063. case ICmpInst::ICMP_UGE:
  5064. if ((RA - 1).isMinValue()) {
  5065. Pred = ICmpInst::ICMP_NE;
  5066. RHS = getConstant(RA - 1);
  5067. Changed = true;
  5068. break;
  5069. }
  5070. if (RA.isMaxValue()) {
  5071. Pred = ICmpInst::ICMP_EQ;
  5072. Changed = true;
  5073. break;
  5074. }
  5075. if (RA.isMinValue()) goto trivially_true;
  5076. Pred = ICmpInst::ICMP_UGT;
  5077. RHS = getConstant(RA - 1);
  5078. Changed = true;
  5079. break;
  5080. case ICmpInst::ICMP_ULE:
  5081. if ((RA + 1).isMaxValue()) {
  5082. Pred = ICmpInst::ICMP_NE;
  5083. RHS = getConstant(RA + 1);
  5084. Changed = true;
  5085. break;
  5086. }
  5087. if (RA.isMinValue()) {
  5088. Pred = ICmpInst::ICMP_EQ;
  5089. Changed = true;
  5090. break;
  5091. }
  5092. if (RA.isMaxValue()) goto trivially_true;
  5093. Pred = ICmpInst::ICMP_ULT;
  5094. RHS = getConstant(RA + 1);
  5095. Changed = true;
  5096. break;
  5097. case ICmpInst::ICMP_SGE:
  5098. if ((RA - 1).isMinSignedValue()) {
  5099. Pred = ICmpInst::ICMP_NE;
  5100. RHS = getConstant(RA - 1);
  5101. Changed = true;
  5102. break;
  5103. }
  5104. if (RA.isMaxSignedValue()) {
  5105. Pred = ICmpInst::ICMP_EQ;
  5106. Changed = true;
  5107. break;
  5108. }
  5109. if (RA.isMinSignedValue()) goto trivially_true;
  5110. Pred = ICmpInst::ICMP_SGT;
  5111. RHS = getConstant(RA - 1);
  5112. Changed = true;
  5113. break;
  5114. case ICmpInst::ICMP_SLE:
  5115. if ((RA + 1).isMaxSignedValue()) {
  5116. Pred = ICmpInst::ICMP_NE;
  5117. RHS = getConstant(RA + 1);
  5118. Changed = true;
  5119. break;
  5120. }
  5121. if (RA.isMinSignedValue()) {
  5122. Pred = ICmpInst::ICMP_EQ;
  5123. Changed = true;
  5124. break;
  5125. }
  5126. if (RA.isMaxSignedValue()) goto trivially_true;
  5127. Pred = ICmpInst::ICMP_SLT;
  5128. RHS = getConstant(RA + 1);
  5129. Changed = true;
  5130. break;
  5131. case ICmpInst::ICMP_UGT:
  5132. if (RA.isMinValue()) {
  5133. Pred = ICmpInst::ICMP_NE;
  5134. Changed = true;
  5135. break;
  5136. }
  5137. if ((RA + 1).isMaxValue()) {
  5138. Pred = ICmpInst::ICMP_EQ;
  5139. RHS = getConstant(RA + 1);
  5140. Changed = true;
  5141. break;
  5142. }
  5143. if (RA.isMaxValue()) goto trivially_false;
  5144. break;
  5145. case ICmpInst::ICMP_ULT:
  5146. if (RA.isMaxValue()) {
  5147. Pred = ICmpInst::ICMP_NE;
  5148. Changed = true;
  5149. break;
  5150. }
  5151. if ((RA - 1).isMinValue()) {
  5152. Pred = ICmpInst::ICMP_EQ;
  5153. RHS = getConstant(RA - 1);
  5154. Changed = true;
  5155. break;
  5156. }
  5157. if (RA.isMinValue()) goto trivially_false;
  5158. break;
  5159. case ICmpInst::ICMP_SGT:
  5160. if (RA.isMinSignedValue()) {
  5161. Pred = ICmpInst::ICMP_NE;
  5162. Changed = true;
  5163. break;
  5164. }
  5165. if ((RA + 1).isMaxSignedValue()) {
  5166. Pred = ICmpInst::ICMP_EQ;
  5167. RHS = getConstant(RA + 1);
  5168. Changed = true;
  5169. break;
  5170. }
  5171. if (RA.isMaxSignedValue()) goto trivially_false;
  5172. break;
  5173. case ICmpInst::ICMP_SLT:
  5174. if (RA.isMaxSignedValue()) {
  5175. Pred = ICmpInst::ICMP_NE;
  5176. Changed = true;
  5177. break;
  5178. }
  5179. if ((RA - 1).isMinSignedValue()) {
  5180. Pred = ICmpInst::ICMP_EQ;
  5181. RHS = getConstant(RA - 1);
  5182. Changed = true;
  5183. break;
  5184. }
  5185. if (RA.isMinSignedValue()) goto trivially_false;
  5186. break;
  5187. }
  5188. }
  5189. // Check for obvious equality.
  5190. if (HasSameValue(LHS, RHS)) {
  5191. if (ICmpInst::isTrueWhenEqual(Pred))
  5192. goto trivially_true;
  5193. if (ICmpInst::isFalseWhenEqual(Pred))
  5194. goto trivially_false;
  5195. }
  5196. // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
  5197. // adding or subtracting 1 from one of the operands.
  5198. switch (Pred) {
  5199. case ICmpInst::ICMP_SLE:
  5200. if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
  5201. RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
  5202. SCEV::FlagNSW);
  5203. Pred = ICmpInst::ICMP_SLT;
  5204. Changed = true;
  5205. } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
  5206. LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
  5207. SCEV::FlagNSW);
  5208. Pred = ICmpInst::ICMP_SLT;
  5209. Changed = true;
  5210. }
  5211. break;
  5212. case ICmpInst::ICMP_SGE:
  5213. if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
  5214. RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
  5215. SCEV::FlagNSW);
  5216. Pred = ICmpInst::ICMP_SGT;
  5217. Changed = true;
  5218. } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
  5219. LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
  5220. SCEV::FlagNSW);
  5221. Pred = ICmpInst::ICMP_SGT;
  5222. Changed = true;
  5223. }
  5224. break;
  5225. case ICmpInst::ICMP_ULE:
  5226. if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
  5227. RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
  5228. SCEV::FlagNUW);
  5229. Pred = ICmpInst::ICMP_ULT;
  5230. Changed = true;
  5231. } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
  5232. LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
  5233. SCEV::FlagNUW);
  5234. Pred = ICmpInst::ICMP_ULT;
  5235. Changed = true;
  5236. }
  5237. break;
  5238. case ICmpInst::ICMP_UGE:
  5239. if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
  5240. RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
  5241. SCEV::FlagNUW);
  5242. Pred = ICmpInst::ICMP_UGT;
  5243. Changed = true;
  5244. } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
  5245. LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
  5246. SCEV::FlagNUW);
  5247. Pred = ICmpInst::ICMP_UGT;
  5248. Changed = true;
  5249. }
  5250. break;
  5251. default:
  5252. break;
  5253. }
  5254. // TODO: More simplifications are possible here.
  5255. // Recursively simplify until we either hit a recursion limit or nothing
  5256. // changes.
  5257. if (Changed)
  5258. return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
  5259. return Changed;
  5260. trivially_true:
  5261. // Return 0 == 0.
  5262. LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
  5263. Pred = ICmpInst::ICMP_EQ;
  5264. return true;
  5265. trivially_false:
  5266. // Return 0 != 0.
  5267. LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
  5268. Pred = ICmpInst::ICMP_NE;
  5269. return true;
  5270. }
  5271. bool ScalarEvolution::isKnownNegative(const SCEV *S) {
  5272. return getSignedRange(S).getSignedMax().isNegative();
  5273. }
  5274. bool ScalarEvolution::isKnownPositive(const SCEV *S) {
  5275. return getSignedRange(S).getSignedMin().isStrictlyPositive();
  5276. }
  5277. bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
  5278. return !getSignedRange(S).getSignedMin().isNegative();
  5279. }
  5280. bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
  5281. return !getSignedRange(S).getSignedMax().isStrictlyPositive();
  5282. }
  5283. bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
  5284. return isKnownNegative(S) || isKnownPositive(S);
  5285. }
  5286. bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
  5287. const SCEV *LHS, const SCEV *RHS) {
  5288. // Canonicalize the inputs first.
  5289. (void)SimplifyICmpOperands(Pred, LHS, RHS);
  5290. // If LHS or RHS is an addrec, check to see if the condition is true in
  5291. // every iteration of the loop.
  5292. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
  5293. if (isLoopEntryGuardedByCond(
  5294. AR->getLoop(), Pred, AR->getStart(), RHS) &&
  5295. isLoopBackedgeGuardedByCond(
  5296. AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
  5297. return true;
  5298. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
  5299. if (isLoopEntryGuardedByCond(
  5300. AR->getLoop(), Pred, LHS, AR->getStart()) &&
  5301. isLoopBackedgeGuardedByCond(
  5302. AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
  5303. return true;
  5304. // Otherwise see what can be done with known constant ranges.
  5305. return isKnownPredicateWithRanges(Pred, LHS, RHS);
  5306. }
  5307. bool
  5308. ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
  5309. const SCEV *LHS, const SCEV *RHS) {
  5310. if (HasSameValue(LHS, RHS))
  5311. return ICmpInst::isTrueWhenEqual(Pred);
  5312. // This code is split out from isKnownPredicate because it is called from
  5313. // within isLoopEntryGuardedByCond.
  5314. switch (Pred) {
  5315. default:
  5316. llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  5317. case ICmpInst::ICMP_SGT:
  5318. Pred = ICmpInst::ICMP_SLT;
  5319. std::swap(LHS, RHS);
  5320. case ICmpInst::ICMP_SLT: {
  5321. ConstantRange LHSRange = getSignedRange(LHS);
  5322. ConstantRange RHSRange = getSignedRange(RHS);
  5323. if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
  5324. return true;
  5325. if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
  5326. return false;
  5327. break;
  5328. }
  5329. case ICmpInst::ICMP_SGE:
  5330. Pred = ICmpInst::ICMP_SLE;
  5331. std::swap(LHS, RHS);
  5332. case ICmpInst::ICMP_SLE: {
  5333. ConstantRange LHSRange = getSignedRange(LHS);
  5334. ConstantRange RHSRange = getSignedRange(RHS);
  5335. if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
  5336. return true;
  5337. if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
  5338. return false;
  5339. break;
  5340. }
  5341. case ICmpInst::ICMP_UGT:
  5342. Pred = ICmpInst::ICMP_ULT;
  5343. std::swap(LHS, RHS);
  5344. case ICmpInst::ICMP_ULT: {
  5345. ConstantRange LHSRange = getUnsignedRange(LHS);
  5346. ConstantRange RHSRange = getUnsignedRange(RHS);
  5347. if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
  5348. return true;
  5349. if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
  5350. return false;
  5351. break;
  5352. }
  5353. case ICmpInst::ICMP_UGE:
  5354. Pred = ICmpInst::ICMP_ULE;
  5355. std::swap(LHS, RHS);
  5356. case ICmpInst::ICMP_ULE: {
  5357. ConstantRange LHSRange = getUnsignedRange(LHS);
  5358. ConstantRange RHSRange = getUnsignedRange(RHS);
  5359. if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
  5360. return true;
  5361. if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
  5362. return false;
  5363. break;
  5364. }
  5365. case ICmpInst::ICMP_NE: {
  5366. if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
  5367. return true;
  5368. if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
  5369. return true;
  5370. const SCEV *Diff = getMinusSCEV(LHS, RHS);
  5371. if (isKnownNonZero(Diff))
  5372. return true;
  5373. break;
  5374. }
  5375. case ICmpInst::ICMP_EQ:
  5376. // The check at the top of the function catches the case where
  5377. // the values are known to be equal.
  5378. break;
  5379. }
  5380. return false;
  5381. }
  5382. /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
  5383. /// protected by a conditional between LHS and RHS. This is used to
  5384. /// to eliminate casts.
  5385. bool
  5386. ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
  5387. ICmpInst::Predicate Pred,
  5388. const SCEV *LHS, const SCEV *RHS) {
  5389. // Interpret a null as meaning no loop, where there is obviously no guard
  5390. // (interprocedural conditions notwithstanding).
  5391. if (!L) return true;
  5392. BasicBlock *Latch = L->getLoopLatch();
  5393. if (!Latch)
  5394. return false;
  5395. BranchInst *LoopContinuePredicate =
  5396. dyn_cast<BranchInst>(Latch->getTerminator());
  5397. if (!LoopContinuePredicate ||
  5398. LoopContinuePredicate->isUnconditional())
  5399. return false;
  5400. return isImpliedCond(Pred, LHS, RHS,
  5401. LoopContinuePredicate->getCondition(),
  5402. LoopContinuePredicate->getSuccessor(0) != L->getHeader());
  5403. }
  5404. /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
  5405. /// by a conditional between LHS and RHS. This is used to help avoid max
  5406. /// expressions in loop trip counts, and to eliminate casts.
  5407. bool
  5408. ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
  5409. ICmpInst::Predicate Pred,
  5410. const SCEV *LHS, const SCEV *RHS) {
  5411. // Interpret a null as meaning no loop, where there is obviously no guard
  5412. // (interprocedural conditions notwithstanding).
  5413. if (!L) return false;
  5414. // Starting at the loop predecessor, climb up the predecessor chain, as long
  5415. // as there are predecessors that can be found that have unique successors
  5416. // leading to the original header.
  5417. for (std::pair<BasicBlock *, BasicBlock *>
  5418. Pair(L->getLoopPredecessor(), L->getHeader());
  5419. Pair.first;
  5420. Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
  5421. BranchInst *LoopEntryPredicate =
  5422. dyn_cast<BranchInst>(Pair.first->getTerminator());
  5423. if (!LoopEntryPredicate ||
  5424. LoopEntryPredicate->isUnconditional())
  5425. continue;
  5426. if (isImpliedCond(Pred, LHS, RHS,
  5427. LoopEntryPredicate->getCondition(),
  5428. LoopEntryPredicate->getSuccessor(0) != Pair.second))
  5429. return true;
  5430. }
  5431. return false;
  5432. }
  5433. /// RAII wrapper to prevent recursive application of isImpliedCond.
  5434. /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
  5435. /// currently evaluating isImpliedCond.
  5436. struct MarkPendingLoopPredicate {
  5437. Value *Cond;
  5438. DenseSet<Value*> &LoopPreds;
  5439. bool Pending;
  5440. MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP)
  5441. : Cond(C), LoopPreds(LP) {
  5442. Pending = !LoopPreds.insert(Cond).second;
  5443. }
  5444. ~MarkPendingLoopPredicate() {
  5445. if (!Pending)
  5446. LoopPreds.erase(Cond);
  5447. }
  5448. };
  5449. /// isImpliedCond - Test whether the condition described by Pred, LHS,
  5450. /// and RHS is true whenever the given Cond value evaluates to true.
  5451. bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
  5452. const SCEV *LHS, const SCEV *RHS,
  5453. Value *FoundCondValue,
  5454. bool Inverse) {
  5455. MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
  5456. if (Mark.Pending)
  5457. return false;
  5458. // Recursively handle And and Or conditions.
  5459. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
  5460. if (BO->getOpcode() == Instruction::And) {
  5461. if (!Inverse)
  5462. return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
  5463. isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
  5464. } else if (BO->getOpcode() == Instruction::Or) {
  5465. if (Inverse)
  5466. return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
  5467. isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
  5468. }
  5469. }
  5470. ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
  5471. if (!ICI) return false;
  5472. // Bail if the ICmp's operands' types are wider than the needed type
  5473. // before attempting to call getSCEV on them. This avoids infinite
  5474. // recursion, since the analysis of widening casts can require loop
  5475. // exit condition information for overflow checking, which would
  5476. // lead back here.
  5477. if (getTypeSizeInBits(LHS->getType()) <
  5478. getTypeSizeInBits(ICI->getOperand(0)->getType()))
  5479. return false;
  5480. // Now that we found a conditional branch that dominates the loop or controls
  5481. // the loop latch. Check to see if it is the comparison we are looking for.
  5482. ICmpInst::Predicate FoundPred;
  5483. if (Inverse)
  5484. FoundPred = ICI->getInversePredicate();
  5485. else
  5486. FoundPred = ICI->getPredicate();
  5487. const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
  5488. const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
  5489. // Balance the types. The case where FoundLHS' type is wider than
  5490. // LHS' type is checked for above.
  5491. if (getTypeSizeInBits(LHS->getType()) >
  5492. getTypeSizeInBits(FoundLHS->getType())) {
  5493. if (CmpInst::isSigned(Pred)) {
  5494. FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
  5495. FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
  5496. } else {
  5497. FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
  5498. FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
  5499. }
  5500. }
  5501. // Canonicalize the query to match the way instcombine will have
  5502. // canonicalized the comparison.
  5503. if (SimplifyICmpOperands(Pred, LHS, RHS))
  5504. if (LHS == RHS)
  5505. return CmpInst::isTrueWhenEqual(Pred);
  5506. // Canonicalize the found cond too. We can't conclude a result from the
  5507. // simplified values.
  5508. SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS);
  5509. // Check to see if we can make the LHS or RHS match.
  5510. if (LHS == FoundRHS || RHS == FoundLHS) {
  5511. if (isa<SCEVConstant>(RHS)) {
  5512. std::swap(FoundLHS, FoundRHS);
  5513. FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
  5514. } else {
  5515. std::swap(LHS, RHS);
  5516. Pred = ICmpInst::getSwappedPredicate(Pred);
  5517. }
  5518. }
  5519. // Check whether the found predicate is the same as the desired predicate.
  5520. if (FoundPred == Pred)
  5521. return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
  5522. // Check whether swapping the found predicate makes it the same as the
  5523. // desired predicate.
  5524. if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
  5525. if (isa<SCEVConstant>(RHS))
  5526. return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
  5527. else
  5528. return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
  5529. RHS, LHS, FoundLHS, FoundRHS);
  5530. }
  5531. // Check whether the actual condition is beyond sufficient.
  5532. if (FoundPred == ICmpInst::ICMP_EQ)
  5533. if (ICmpInst::isTrueWhenEqual(Pred))
  5534. if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
  5535. return true;
  5536. if (Pred == ICmpInst::ICMP_NE)
  5537. if (!ICmpInst::isTrueWhenEqual(FoundPred))
  5538. if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
  5539. return true;
  5540. // Otherwise assume the worst.
  5541. return false;
  5542. }
  5543. /// isImpliedCondOperands - Test whether the condition described by Pred,
  5544. /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
  5545. /// and FoundRHS is true.
  5546. bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
  5547. const SCEV *LHS, const SCEV *RHS,
  5548. const SCEV *FoundLHS,
  5549. const SCEV *FoundRHS) {
  5550. return isImpliedCondOperandsHelper(Pred, LHS, RHS,
  5551. FoundLHS, FoundRHS) ||
  5552. // ~x < ~y --> x > y
  5553. isImpliedCondOperandsHelper(Pred, LHS, RHS,
  5554. getNotSCEV(FoundRHS),
  5555. getNotSCEV(FoundLHS));
  5556. }
  5557. /// isImpliedCondOperandsHelper - Test whether the condition described by
  5558. /// Pred, LHS, and RHS is true whenever the condition described by Pred,
  5559. /// FoundLHS, and FoundRHS is true.
  5560. bool
  5561. ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
  5562. const SCEV *LHS, const SCEV *RHS,
  5563. const SCEV *FoundLHS,
  5564. const SCEV *FoundRHS) {
  5565. switch (Pred) {
  5566. default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
  5567. case ICmpInst::ICMP_EQ:
  5568. case ICmpInst::ICMP_NE:
  5569. if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
  5570. return true;
  5571. break;
  5572. case ICmpInst::ICMP_SLT:
  5573. case ICmpInst::ICMP_SLE:
  5574. if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
  5575. isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
  5576. return true;
  5577. break;
  5578. case ICmpInst::ICMP_SGT:
  5579. case ICmpInst::ICMP_SGE:
  5580. if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
  5581. isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
  5582. return true;
  5583. break;
  5584. case ICmpInst::ICMP_ULT:
  5585. case ICmpInst::ICMP_ULE:
  5586. if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
  5587. isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
  5588. return true;
  5589. break;
  5590. case ICmpInst::ICMP_UGT:
  5591. case ICmpInst::ICMP_UGE:
  5592. if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
  5593. isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
  5594. return true;
  5595. break;
  5596. }
  5597. return false;
  5598. }
  5599. /// getBECount - Subtract the end and start values and divide by the step,
  5600. /// rounding up, to get the number of times the backedge is executed. Return
  5601. /// CouldNotCompute if an intermediate computation overflows.
  5602. const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
  5603. const SCEV *End,
  5604. const SCEV *Step,
  5605. bool NoWrap) {
  5606. assert(!isKnownNegative(Step) &&
  5607. "This code doesn't handle negative strides yet!");
  5608. Type *Ty = Start->getType();
  5609. // When Start == End, we have an exact BECount == 0. Short-circuit this case
  5610. // here because SCEV may not be able to determine that the unsigned division
  5611. // after rounding is zero.
  5612. if (Start == End)
  5613. return getConstant(Ty, 0);
  5614. const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
  5615. const SCEV *Diff = getMinusSCEV(End, Start);
  5616. const SCEV *RoundUp = getAddExpr(Step, NegOne);
  5617. // Add an adjustment to the difference between End and Start so that
  5618. // the division will effectively round up.
  5619. const SCEV *Add = getAddExpr(Diff, RoundUp);
  5620. if (!NoWrap) {
  5621. // Check Add for unsigned overflow.
  5622. // TODO: More sophisticated things could be done here.
  5623. Type *WideTy = IntegerType::get(getContext(),
  5624. getTypeSizeInBits(Ty) + 1);
  5625. const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
  5626. const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
  5627. const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
  5628. if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
  5629. return getCouldNotCompute();
  5630. }
  5631. return getUDivExpr(Add, Step);
  5632. }
  5633. /// HowManyLessThans - Return the number of times a backedge containing the
  5634. /// specified less-than comparison will execute. If not computable, return
  5635. /// CouldNotCompute.
  5636. ScalarEvolution::ExitLimit
  5637. ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
  5638. const Loop *L, bool isSigned) {
  5639. // Only handle: "ADDREC < LoopInvariant".
  5640. if (!isLoopInvariant(RHS, L)) return getCouldNotCompute();
  5641. const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
  5642. if (!AddRec || AddRec->getLoop() != L)
  5643. return getCouldNotCompute();
  5644. // Check to see if we have a flag which makes analysis easy.
  5645. bool NoWrap = isSigned ?
  5646. AddRec->getNoWrapFlags((SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNW)) :
  5647. AddRec->getNoWrapFlags((SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNW));
  5648. if (AddRec->isAffine()) {
  5649. unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
  5650. const SCEV *Step = AddRec->getStepRecurrence(*this);
  5651. if (Step->isZero())
  5652. return getCouldNotCompute();
  5653. if (Step->isOne()) {
  5654. // With unit stride, the iteration never steps past the limit value.
  5655. } else if (isKnownPositive(Step)) {
  5656. // Test whether a positive iteration can step past the limit
  5657. // value and past the maximum value for its type in a single step.
  5658. // Note that it's not sufficient to check NoWrap here, because even
  5659. // though the value after a wrap is undefined, it's not undefined
  5660. // behavior, so if wrap does occur, the loop could either terminate or
  5661. // loop infinitely, but in either case, the loop is guaranteed to
  5662. // iterate at least until the iteration where the wrapping occurs.
  5663. const SCEV *One = getConstant(Step->getType(), 1);
  5664. if (isSigned) {
  5665. APInt Max = APInt::getSignedMaxValue(BitWidth);
  5666. if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
  5667. .slt(getSignedRange(RHS).getSignedMax()))
  5668. return getCouldNotCompute();
  5669. } else {
  5670. APInt Max = APInt::getMaxValue(BitWidth);
  5671. if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
  5672. .ult(getUnsignedRange(RHS).getUnsignedMax()))
  5673. return getCouldNotCompute();
  5674. }
  5675. } else
  5676. // TODO: Handle negative strides here and below.
  5677. return getCouldNotCompute();
  5678. // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
  5679. // m. So, we count the number of iterations in which {n,+,s} < m is true.
  5680. // Note that we cannot simply return max(m-n,0)/s because it's not safe to
  5681. // treat m-n as signed nor unsigned due to overflow possibility.
  5682. // First, we get the value of the LHS in the first iteration: n
  5683. const SCEV *Start = AddRec->getOperand(0);
  5684. // Determine the minimum constant start value.
  5685. const SCEV *MinStart = getConstant(isSigned ?
  5686. getSignedRange(Start).getSignedMin() :
  5687. getUnsignedRange(Start).getUnsignedMin());
  5688. // If we know that the condition is true in order to enter the loop,
  5689. // then we know that it will run exactly (m-n)/s times. Otherwise, we
  5690. // only know that it will execute (max(m,n)-n)/s times. In both cases,
  5691. // the division must round up.
  5692. const SCEV *End = RHS;
  5693. if (!isLoopEntryGuardedByCond(L,
  5694. isSigned ? ICmpInst::ICMP_SLT :
  5695. ICmpInst::ICMP_ULT,
  5696. getMinusSCEV(Start, Step), RHS))
  5697. End = isSigned ? getSMaxExpr(RHS, Start)
  5698. : getUMaxExpr(RHS, Start);
  5699. // Determine the maximum constant end value.
  5700. const SCEV *MaxEnd = getConstant(isSigned ?
  5701. getSignedRange(End).getSignedMax() :
  5702. getUnsignedRange(End).getUnsignedMax());
  5703. // If MaxEnd is within a step of the maximum integer value in its type,
  5704. // adjust it down to the minimum value which would produce the same effect.
  5705. // This allows the subsequent ceiling division of (N+(step-1))/step to
  5706. // compute the correct value.
  5707. const SCEV *StepMinusOne = getMinusSCEV(Step,
  5708. getConstant(Step->getType(), 1));
  5709. MaxEnd = isSigned ?
  5710. getSMinExpr(MaxEnd,
  5711. getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
  5712. StepMinusOne)) :
  5713. getUMinExpr(MaxEnd,
  5714. getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
  5715. StepMinusOne));
  5716. // Finally, we subtract these two values and divide, rounding up, to get
  5717. // the number of times the backedge is executed.
  5718. const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
  5719. // The maximum backedge count is similar, except using the minimum start
  5720. // value and the maximum end value.
  5721. // If we already have an exact constant BECount, use it instead.
  5722. const SCEV *MaxBECount = isa<SCEVConstant>(BECount) ? BECount
  5723. : getBECount(MinStart, MaxEnd, Step, NoWrap);
  5724. // If the stride is nonconstant, and NoWrap == true, then
  5725. // getBECount(MinStart, MaxEnd) may not compute. This would result in an
  5726. // exact BECount and invalid MaxBECount, which should be avoided to catch
  5727. // more optimization opportunities.
  5728. if (isa<SCEVCouldNotCompute>(MaxBECount))
  5729. MaxBECount = BECount;
  5730. return ExitLimit(BECount, MaxBECount);
  5731. }
  5732. return getCouldNotCompute();
  5733. }
  5734. /// getNumIterationsInRange - Return the number of iterations of this loop that
  5735. /// produce values in the specified constant range. Another way of looking at
  5736. /// this is that it returns the first iteration number where the value is not in
  5737. /// the condition, thus computing the exit count. If the iteration count can't
  5738. /// be computed, an instance of SCEVCouldNotCompute is returned.
  5739. const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
  5740. ScalarEvolution &SE) const {
  5741. if (Range.isFullSet()) // Infinite loop.
  5742. return SE.getCouldNotCompute();
  5743. // If the start is a non-zero constant, shift the range to simplify things.
  5744. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
  5745. if (!SC->getValue()->isZero()) {
  5746. SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
  5747. Operands[0] = SE.getConstant(SC->getType(), 0);
  5748. const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
  5749. getNoWrapFlags(FlagNW));
  5750. if (const SCEVAddRecExpr *ShiftedAddRec =
  5751. dyn_cast<SCEVAddRecExpr>(Shifted))
  5752. return ShiftedAddRec->getNumIterationsInRange(
  5753. Range.subtract(SC->getValue()->getValue()), SE);
  5754. // This is strange and shouldn't happen.
  5755. return SE.getCouldNotCompute();
  5756. }
  5757. // The only time we can solve this is when we have all constant indices.
  5758. // Otherwise, we cannot determine the overflow conditions.
  5759. for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
  5760. if (!isa<SCEVConstant>(getOperand(i)))
  5761. return SE.getCouldNotCompute();
  5762. // Okay at this point we know that all elements of the chrec are constants and
  5763. // that the start element is zero.
  5764. // First check to see if the range contains zero. If not, the first
  5765. // iteration exits.
  5766. unsigned BitWidth = SE.getTypeSizeInBits(getType());
  5767. if (!Range.contains(APInt(BitWidth, 0)))
  5768. return SE.getConstant(getType(), 0);
  5769. if (isAffine()) {
  5770. // If this is an affine expression then we have this situation:
  5771. // Solve {0,+,A} in Range === Ax in Range
  5772. // We know that zero is in the range. If A is positive then we know that
  5773. // the upper value of the range must be the first possible exit value.
  5774. // If A is negative then the lower of the range is the last possible loop
  5775. // value. Also note that we already checked for a full range.
  5776. APInt One(BitWidth,1);
  5777. APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
  5778. APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
  5779. // The exit value should be (End+A)/A.
  5780. APInt ExitVal = (End + A).udiv(A);
  5781. ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
  5782. // Evaluate at the exit value. If we really did fall out of the valid
  5783. // range, then we computed our trip count, otherwise wrap around or other
  5784. // things must have happened.
  5785. ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
  5786. if (Range.contains(Val->getValue()))
  5787. return SE.getCouldNotCompute(); // Something strange happened
  5788. // Ensure that the previous value is in the range. This is a sanity check.
  5789. assert(Range.contains(
  5790. EvaluateConstantChrecAtConstant(this,
  5791. ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
  5792. "Linear scev computation is off in a bad way!");
  5793. return SE.getConstant(ExitValue);
  5794. } else if (isQuadratic()) {
  5795. // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
  5796. // quadratic equation to solve it. To do this, we must frame our problem in
  5797. // terms of figuring out when zero is crossed, instead of when
  5798. // Range.getUpper() is crossed.
  5799. SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
  5800. NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
  5801. const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
  5802. // getNoWrapFlags(FlagNW)
  5803. FlagAnyWrap);
  5804. // Next, solve the constructed addrec
  5805. std::pair<const SCEV *,const SCEV *> Roots =
  5806. SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
  5807. const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
  5808. const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
  5809. if (R1) {
  5810. // Pick the smallest positive root value.
  5811. if (ConstantInt *CB =
  5812. dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
  5813. R1->getValue(), R2->getValue()))) {
  5814. if (CB->getZExtValue() == false)
  5815. std::swap(R1, R2); // R1 is the minimum root now.
  5816. // Make sure the root is not off by one. The returned iteration should
  5817. // not be in the range, but the previous one should be. When solving
  5818. // for "X*X < 5", for example, we should not return a root of 2.
  5819. ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
  5820. R1->getValue(),
  5821. SE);
  5822. if (Range.contains(R1Val->getValue())) {
  5823. // The next iteration must be out of the range...
  5824. ConstantInt *NextVal =
  5825. ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
  5826. R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
  5827. if (!Range.contains(R1Val->getValue()))
  5828. return SE.getConstant(NextVal);
  5829. return SE.getCouldNotCompute(); // Something strange happened
  5830. }
  5831. // If R1 was not in the range, then it is a good return value. Make
  5832. // sure that R1-1 WAS in the range though, just in case.
  5833. ConstantInt *NextVal =
  5834. ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
  5835. R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
  5836. if (Range.contains(R1Val->getValue()))
  5837. return R1;
  5838. return SE.getCouldNotCompute(); // Something strange happened
  5839. }
  5840. }
  5841. }
  5842. return SE.getCouldNotCompute();
  5843. }
  5844. //===----------------------------------------------------------------------===//
  5845. // SCEVCallbackVH Class Implementation
  5846. //===----------------------------------------------------------------------===//
  5847. void ScalarEvolution::SCEVCallbackVH::deleted() {
  5848. assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
  5849. if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
  5850. SE->ConstantEvolutionLoopExitValue.erase(PN);
  5851. SE->ValueExprMap.erase(getValPtr());
  5852. // this now dangles!
  5853. }
  5854. void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
  5855. assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
  5856. // Forget all the expressions associated with users of the old value,
  5857. // so that future queries will recompute the expressions using the new
  5858. // value.
  5859. Value *Old = getValPtr();
  5860. SmallVector<User *, 16> Worklist;
  5861. SmallPtrSet<User *, 8> Visited;
  5862. for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
  5863. UI != UE; ++UI)
  5864. Worklist.push_back(*UI);
  5865. while (!Worklist.empty()) {
  5866. User *U = Worklist.pop_back_val();
  5867. // Deleting the Old value will cause this to dangle. Postpone
  5868. // that until everything else is done.
  5869. if (U == Old)
  5870. continue;
  5871. if (!Visited.insert(U))
  5872. continue;
  5873. if (PHINode *PN = dyn_cast<PHINode>(U))
  5874. SE->ConstantEvolutionLoopExitValue.erase(PN);
  5875. SE->ValueExprMap.erase(U);
  5876. for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
  5877. UI != UE; ++UI)
  5878. Worklist.push_back(*UI);
  5879. }
  5880. // Delete the Old value.
  5881. if (PHINode *PN = dyn_cast<PHINode>(Old))
  5882. SE->ConstantEvolutionLoopExitValue.erase(PN);
  5883. SE->ValueExprMap.erase(Old);
  5884. // this now dangles!
  5885. }
  5886. ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
  5887. : CallbackVH(V), SE(se) {}
  5888. //===----------------------------------------------------------------------===//
  5889. // ScalarEvolution Class Implementation
  5890. //===----------------------------------------------------------------------===//
  5891. ScalarEvolution::ScalarEvolution()
  5892. : FunctionPass(ID), FirstUnknown(0) {
  5893. initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
  5894. }
  5895. bool ScalarEvolution::runOnFunction(Function &F) {
  5896. this->F = &F;
  5897. LI = &getAnalysis<LoopInfo>();
  5898. TD = getAnalysisIfAvailable<DataLayout>();
  5899. TLI = &getAnalysis<TargetLibraryInfo>();
  5900. DT = &getAnalysis<DominatorTree>();
  5901. return false;
  5902. }
  5903. void ScalarEvolution::releaseMemory() {
  5904. // Iterate through all the SCEVUnknown instances and call their
  5905. // destructors, so that they release their references to their values.
  5906. for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
  5907. U->~SCEVUnknown();
  5908. FirstUnknown = 0;
  5909. ValueExprMap.clear();
  5910. // Free any extra memory created for ExitNotTakenInfo in the unlikely event
  5911. // that a loop had multiple computable exits.
  5912. for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
  5913. BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
  5914. I != E; ++I) {
  5915. I->second.clear();
  5916. }
  5917. assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
  5918. BackedgeTakenCounts.clear();
  5919. ConstantEvolutionLoopExitValue.clear();
  5920. ValuesAtScopes.clear();
  5921. LoopDispositions.clear();
  5922. BlockDispositions.clear();
  5923. UnsignedRanges.clear();
  5924. SignedRanges.clear();
  5925. UniqueSCEVs.clear();
  5926. SCEVAllocator.Reset();
  5927. }
  5928. void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
  5929. AU.setPreservesAll();
  5930. AU.addRequiredTransitive<LoopInfo>();
  5931. AU.addRequiredTransitive<DominatorTree>();
  5932. AU.addRequired<TargetLibraryInfo>();
  5933. }
  5934. bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
  5935. return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
  5936. }
  5937. static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
  5938. const Loop *L) {
  5939. // Print all inner loops first
  5940. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
  5941. PrintLoopInfo(OS, SE, *I);
  5942. OS << "Loop ";
  5943. WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
  5944. OS << ": ";
  5945. SmallVector<BasicBlock *, 8> ExitBlocks;
  5946. L->getExitBlocks(ExitBlocks);
  5947. if (ExitBlocks.size() != 1)
  5948. OS << "<multiple exits> ";
  5949. if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
  5950. OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
  5951. } else {
  5952. OS << "Unpredictable backedge-taken count. ";
  5953. }
  5954. OS << "\n"
  5955. "Loop ";
  5956. WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
  5957. OS << ": ";
  5958. if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
  5959. OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
  5960. } else {
  5961. OS << "Unpredictable max backedge-taken count. ";
  5962. }
  5963. OS << "\n";
  5964. }
  5965. void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
  5966. // ScalarEvolution's implementation of the print method is to print
  5967. // out SCEV values of all instructions that are interesting. Doing
  5968. // this potentially causes it to create new SCEV objects though,
  5969. // which technically conflicts with the const qualifier. This isn't
  5970. // observable from outside the class though, so casting away the
  5971. // const isn't dangerous.
  5972. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  5973. OS << "Classifying expressions for: ";
  5974. WriteAsOperand(OS, F, /*PrintType=*/false);
  5975. OS << "\n";
  5976. for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
  5977. if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
  5978. OS << *I << '\n';
  5979. OS << " --> ";
  5980. const SCEV *SV = SE.getSCEV(&*I);
  5981. SV->print(OS);
  5982. const Loop *L = LI->getLoopFor((*I).getParent());
  5983. const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
  5984. if (AtUse != SV) {
  5985. OS << " --> ";
  5986. AtUse->print(OS);
  5987. }
  5988. if (L) {
  5989. OS << "\t\t" "Exits: ";
  5990. const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
  5991. if (!SE.isLoopInvariant(ExitValue, L)) {
  5992. OS << "<<Unknown>>";
  5993. } else {
  5994. OS << *ExitValue;
  5995. }
  5996. }
  5997. OS << "\n";
  5998. }
  5999. OS << "Determining loop execution counts for: ";
  6000. WriteAsOperand(OS, F, /*PrintType=*/false);
  6001. OS << "\n";
  6002. for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
  6003. PrintLoopInfo(OS, &SE, *I);
  6004. }
  6005. ScalarEvolution::LoopDisposition
  6006. ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
  6007. std::map<const Loop *, LoopDisposition> &Values = LoopDispositions[S];
  6008. std::pair<std::map<const Loop *, LoopDisposition>::iterator, bool> Pair =
  6009. Values.insert(std::make_pair(L, LoopVariant));
  6010. if (!Pair.second)
  6011. return Pair.first->second;
  6012. LoopDisposition D = computeLoopDisposition(S, L);
  6013. return LoopDispositions[S][L] = D;
  6014. }
  6015. ScalarEvolution::LoopDisposition
  6016. ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
  6017. switch (S->getSCEVType()) {
  6018. case scConstant:
  6019. return LoopInvariant;
  6020. case scTruncate:
  6021. case scZeroExtend:
  6022. case scSignExtend:
  6023. return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
  6024. case scAddRecExpr: {
  6025. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
  6026. // If L is the addrec's loop, it's computable.
  6027. if (AR->getLoop() == L)
  6028. return LoopComputable;
  6029. // Add recurrences are never invariant in the function-body (null loop).
  6030. if (!L)
  6031. return LoopVariant;
  6032. // This recurrence is variant w.r.t. L if L contains AR's loop.
  6033. if (L->contains(AR->getLoop()))
  6034. return LoopVariant;
  6035. // This recurrence is invariant w.r.t. L if AR's loop contains L.
  6036. if (AR->getLoop()->contains(L))
  6037. return LoopInvariant;
  6038. // This recurrence is variant w.r.t. L if any of its operands
  6039. // are variant.
  6040. for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
  6041. I != E; ++I)
  6042. if (!isLoopInvariant(*I, L))
  6043. return LoopVariant;
  6044. // Otherwise it's loop-invariant.
  6045. return LoopInvariant;
  6046. }
  6047. case scAddExpr:
  6048. case scMulExpr:
  6049. case scUMaxExpr:
  6050. case scSMaxExpr: {
  6051. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
  6052. bool HasVarying = false;
  6053. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  6054. I != E; ++I) {
  6055. LoopDisposition D = getLoopDisposition(*I, L);
  6056. if (D == LoopVariant)
  6057. return LoopVariant;
  6058. if (D == LoopComputable)
  6059. HasVarying = true;
  6060. }
  6061. return HasVarying ? LoopComputable : LoopInvariant;
  6062. }
  6063. case scUDivExpr: {
  6064. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
  6065. LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
  6066. if (LD == LoopVariant)
  6067. return LoopVariant;
  6068. LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
  6069. if (RD == LoopVariant)
  6070. return LoopVariant;
  6071. return (LD == LoopInvariant && RD == LoopInvariant) ?
  6072. LoopInvariant : LoopComputable;
  6073. }
  6074. case scUnknown:
  6075. // All non-instruction values are loop invariant. All instructions are loop
  6076. // invariant if they are not contained in the specified loop.
  6077. // Instructions are never considered invariant in the function body
  6078. // (null loop) because they are defined within the "loop".
  6079. if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
  6080. return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
  6081. return LoopInvariant;
  6082. case scCouldNotCompute:
  6083. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  6084. default: llvm_unreachable("Unknown SCEV kind!");
  6085. }
  6086. }
  6087. bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
  6088. return getLoopDisposition(S, L) == LoopInvariant;
  6089. }
  6090. bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
  6091. return getLoopDisposition(S, L) == LoopComputable;
  6092. }
  6093. ScalarEvolution::BlockDisposition
  6094. ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
  6095. std::map<const BasicBlock *, BlockDisposition> &Values = BlockDispositions[S];
  6096. std::pair<std::map<const BasicBlock *, BlockDisposition>::iterator, bool>
  6097. Pair = Values.insert(std::make_pair(BB, DoesNotDominateBlock));
  6098. if (!Pair.second)
  6099. return Pair.first->second;
  6100. BlockDisposition D = computeBlockDisposition(S, BB);
  6101. return BlockDispositions[S][BB] = D;
  6102. }
  6103. ScalarEvolution::BlockDisposition
  6104. ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
  6105. switch (S->getSCEVType()) {
  6106. case scConstant:
  6107. return ProperlyDominatesBlock;
  6108. case scTruncate:
  6109. case scZeroExtend:
  6110. case scSignExtend:
  6111. return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
  6112. case scAddRecExpr: {
  6113. // This uses a "dominates" query instead of "properly dominates" query
  6114. // to test for proper dominance too, because the instruction which
  6115. // produces the addrec's value is a PHI, and a PHI effectively properly
  6116. // dominates its entire containing block.
  6117. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
  6118. if (!DT->dominates(AR->getLoop()->getHeader(), BB))
  6119. return DoesNotDominateBlock;
  6120. }
  6121. // FALL THROUGH into SCEVNAryExpr handling.
  6122. case scAddExpr:
  6123. case scMulExpr:
  6124. case scUMaxExpr:
  6125. case scSMaxExpr: {
  6126. const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
  6127. bool Proper = true;
  6128. for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
  6129. I != E; ++I) {
  6130. BlockDisposition D = getBlockDisposition(*I, BB);
  6131. if (D == DoesNotDominateBlock)
  6132. return DoesNotDominateBlock;
  6133. if (D == DominatesBlock)
  6134. Proper = false;
  6135. }
  6136. return Proper ? ProperlyDominatesBlock : DominatesBlock;
  6137. }
  6138. case scUDivExpr: {
  6139. const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
  6140. const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
  6141. BlockDisposition LD = getBlockDisposition(LHS, BB);
  6142. if (LD == DoesNotDominateBlock)
  6143. return DoesNotDominateBlock;
  6144. BlockDisposition RD = getBlockDisposition(RHS, BB);
  6145. if (RD == DoesNotDominateBlock)
  6146. return DoesNotDominateBlock;
  6147. return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
  6148. ProperlyDominatesBlock : DominatesBlock;
  6149. }
  6150. case scUnknown:
  6151. if (Instruction *I =
  6152. dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
  6153. if (I->getParent() == BB)
  6154. return DominatesBlock;
  6155. if (DT->properlyDominates(I->getParent(), BB))
  6156. return ProperlyDominatesBlock;
  6157. return DoesNotDominateBlock;
  6158. }
  6159. return ProperlyDominatesBlock;
  6160. case scCouldNotCompute:
  6161. llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  6162. default:
  6163. llvm_unreachable("Unknown SCEV kind!");
  6164. }
  6165. }
  6166. bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
  6167. return getBlockDisposition(S, BB) >= DominatesBlock;
  6168. }
  6169. bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
  6170. return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
  6171. }
  6172. namespace {
  6173. // Search for a SCEV expression node within an expression tree.
  6174. // Implements SCEVTraversal::Visitor.
  6175. struct SCEVSearch {
  6176. const SCEV *Node;
  6177. bool IsFound;
  6178. SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
  6179. bool follow(const SCEV *S) {
  6180. IsFound |= (S == Node);
  6181. return !IsFound;
  6182. }
  6183. bool isDone() const { return IsFound; }
  6184. };
  6185. }
  6186. bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
  6187. SCEVSearch Search(Op);
  6188. visitAll(S, Search);
  6189. return Search.IsFound;
  6190. }
  6191. void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
  6192. ValuesAtScopes.erase(S);
  6193. LoopDispositions.erase(S);
  6194. BlockDispositions.erase(S);
  6195. UnsignedRanges.erase(S);
  6196. SignedRanges.erase(S);
  6197. }
  6198. typedef DenseMap<const Loop *, std::string> VerifyMap;
  6199. /// replaceSubString - Replaces all occurences of From in Str with To.
  6200. static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
  6201. size_t Pos = 0;
  6202. while ((Pos = Str.find(From, Pos)) != std::string::npos) {
  6203. Str.replace(Pos, From.size(), To.data(), To.size());
  6204. Pos += To.size();
  6205. }
  6206. }
  6207. /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
  6208. static void
  6209. getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
  6210. for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
  6211. getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
  6212. std::string &S = Map[L];
  6213. if (S.empty()) {
  6214. raw_string_ostream OS(S);
  6215. SE.getBackedgeTakenCount(L)->print(OS);
  6216. // false and 0 are semantically equivalent. This can happen in dead loops.
  6217. replaceSubString(OS.str(), "false", "0");
  6218. // Remove wrap flags, their use in SCEV is highly fragile.
  6219. // FIXME: Remove this when SCEV gets smarter about them.
  6220. replaceSubString(OS.str(), "<nw>", "");
  6221. replaceSubString(OS.str(), "<nsw>", "");
  6222. replaceSubString(OS.str(), "<nuw>", "");
  6223. }
  6224. }
  6225. }
  6226. void ScalarEvolution::verifyAnalysis() const {
  6227. if (!VerifySCEV)
  6228. return;
  6229. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
  6230. // Gather stringified backedge taken counts for all loops using SCEV's caches.
  6231. // FIXME: It would be much better to store actual values instead of strings,
  6232. // but SCEV pointers will change if we drop the caches.
  6233. VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
  6234. for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
  6235. getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
  6236. // Gather stringified backedge taken counts for all loops without using
  6237. // SCEV's caches.
  6238. SE.releaseMemory();
  6239. for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
  6240. getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
  6241. // Now compare whether they're the same with and without caches. This allows
  6242. // verifying that no pass changed the cache.
  6243. assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
  6244. "New loops suddenly appeared!");
  6245. for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
  6246. OldE = BackedgeDumpsOld.end(),
  6247. NewI = BackedgeDumpsNew.begin();
  6248. OldI != OldE; ++OldI, ++NewI) {
  6249. assert(OldI->first == NewI->first && "Loop order changed!");
  6250. // Compare the stringified SCEVs. We don't care if undef backedgetaken count
  6251. // changes.
  6252. // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
  6253. // means that a pass is buggy or SCEV has to learn a new pattern but is
  6254. // usually not harmful.
  6255. if (OldI->second != NewI->second &&
  6256. OldI->second.find("undef") == std::string::npos &&
  6257. NewI->second.find("undef") == std::string::npos &&
  6258. OldI->second != "***COULDNOTCOMPUTE***" &&
  6259. NewI->second != "***COULDNOTCOMPUTE***") {
  6260. dbgs() << "SCEVValidator: SCEV for loop '"
  6261. << OldI->first->getHeader()->getName()
  6262. << "' changed from '" << OldI->second
  6263. << "' to '" << NewI->second << "'!\n";
  6264. std::abort();
  6265. }
  6266. }
  6267. // TODO: Verify more things.
  6268. }