1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327 |
- //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This contains code to emit Builtin calls as LLVM code.
- //
- //===----------------------------------------------------------------------===//
- #include "CGCXXABI.h"
- #include "CGObjCRuntime.h"
- #include "CGOpenCLRuntime.h"
- #include "CGRecordLayout.h"
- #include "CodeGenFunction.h"
- #include "CodeGenModule.h"
- #include "ConstantEmitter.h"
- #include "PatternInit.h"
- #include "TargetInfo.h"
- #include "clang/AST/ASTContext.h"
- #include "clang/AST/Decl.h"
- #include "clang/AST/OSLog.h"
- #include "clang/Basic/TargetBuiltins.h"
- #include "clang/Basic/TargetInfo.h"
- #include "clang/CodeGen/CGFunctionInfo.h"
- #include "llvm/ADT/SmallPtrSet.h"
- #include "llvm/ADT/StringExtras.h"
- #include "llvm/IR/DataLayout.h"
- #include "llvm/IR/InlineAsm.h"
- #include "llvm/IR/Intrinsics.h"
- #include "llvm/IR/MDBuilder.h"
- #include "llvm/Support/ConvertUTF.h"
- #include "llvm/Support/ScopedPrinter.h"
- #include "llvm/Support/TargetParser.h"
- #include <sstream>
- using namespace clang;
- using namespace CodeGen;
- using namespace llvm;
- static
- int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
- return std::min(High, std::max(Low, Value));
- }
- static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, unsigned AlignmentInBytes) {
- ConstantInt *Byte;
- switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
- case LangOptions::TrivialAutoVarInitKind::Uninitialized:
- // Nothing to initialize.
- return;
- case LangOptions::TrivialAutoVarInitKind::Zero:
- Byte = CGF.Builder.getInt8(0x00);
- break;
- case LangOptions::TrivialAutoVarInitKind::Pattern: {
- llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
- Byte = llvm::dyn_cast<llvm::ConstantInt>(
- initializationPatternFor(CGF.CGM, Int8));
- break;
- }
- }
- CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
- }
- /// getBuiltinLibFunction - Given a builtin id for a function like
- /// "__builtin_fabsf", return a Function* for "fabsf".
- llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
- unsigned BuiltinID) {
- assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
- // Get the name, skip over the __builtin_ prefix (if necessary).
- StringRef Name;
- GlobalDecl D(FD);
- // If the builtin has been declared explicitly with an assembler label,
- // use the mangled name. This differs from the plain label on platforms
- // that prefix labels.
- if (FD->hasAttr<AsmLabelAttr>())
- Name = getMangledName(D);
- else
- Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
- llvm::FunctionType *Ty =
- cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
- return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
- }
- /// Emit the conversions required to turn the given value into an
- /// integer of the given size.
- static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, llvm::IntegerType *IntType) {
- V = CGF.EmitToMemory(V, T);
- if (V->getType()->isPointerTy())
- return CGF.Builder.CreatePtrToInt(V, IntType);
- assert(V->getType() == IntType);
- return V;
- }
- static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, llvm::Type *ResultType) {
- V = CGF.EmitFromMemory(V, T);
- if (ResultType->isPointerTy())
- return CGF.Builder.CreateIntToPtr(V, ResultType);
- assert(V->getType() == ResultType);
- return V;
- }
- /// Utility to insert an atomic instruction based on Intrinsic::ID
- /// and the expression node.
- static Value *MakeBinaryAtomicValue(
- CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
- AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
- QualType T = E->getType();
- assert(E->getArg(0)->getType()->isPointerType());
- assert(CGF.getContext().hasSameUnqualifiedType(T,
- E->getArg(0)->getType()->getPointeeType()));
- assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
- llvm::IntegerType *IntType =
- llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Value *Args[2];
- Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- llvm::Type *ValueType = Args[1]->getType();
- Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
- Kind, Args[0], Args[1], Ordering);
- return EmitFromInt(CGF, Result, T, ValueType);
- }
- static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
- Value *Val = CGF.EmitScalarExpr(E->getArg(0));
- Value *Address = CGF.EmitScalarExpr(E->getArg(1));
- // Convert the type of the pointer to a pointer to the stored type.
- Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
- Value *BC = CGF.Builder.CreateBitCast(
- Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
- LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
- LV.setNontemporal(true);
- CGF.EmitStoreOfScalar(Val, LV, false);
- return nullptr;
- }
- static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
- Value *Address = CGF.EmitScalarExpr(E->getArg(0));
- LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
- LV.setNontemporal(true);
- return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
- }
- static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
- llvm::AtomicRMWInst::BinOp Kind,
- const CallExpr *E) {
- return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
- }
- /// Utility to insert an atomic instruction based Intrinsic::ID and
- /// the expression node, where the return value is the result of the
- /// operation.
- static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
- llvm::AtomicRMWInst::BinOp Kind,
- const CallExpr *E,
- Instruction::BinaryOps Op,
- bool Invert = false) {
- QualType T = E->getType();
- assert(E->getArg(0)->getType()->isPointerType());
- assert(CGF.getContext().hasSameUnqualifiedType(T,
- E->getArg(0)->getType()->getPointeeType()));
- assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
- llvm::IntegerType *IntType =
- llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Value *Args[2];
- Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- llvm::Type *ValueType = Args[1]->getType();
- Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
- Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
- Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
- if (Invert)
- Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
- llvm::ConstantInt::get(IntType, -1));
- Result = EmitFromInt(CGF, Result, T, ValueType);
- return RValue::get(Result);
- }
- /// Utility to insert an atomic cmpxchg instruction.
- ///
- /// @param CGF The current codegen function.
- /// @param E Builtin call expression to convert to cmpxchg.
- /// arg0 - address to operate on
- /// arg1 - value to compare with
- /// arg2 - new value
- /// @param ReturnBool Specifies whether to return success flag of
- /// cmpxchg result or the old value.
- ///
- /// @returns result of cmpxchg, according to ReturnBool
- ///
- /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
- /// invoke the function EmitAtomicCmpXchgForMSIntrin.
- static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
- bool ReturnBool) {
- QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
- llvm::IntegerType *IntType = llvm::IntegerType::get(
- CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- Value *Args[3];
- Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- llvm::Type *ValueType = Args[1]->getType();
- Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
- Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
- Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
- llvm::AtomicOrdering::SequentiallyConsistent);
- if (ReturnBool)
- // Extract boolean success flag and zext it to int.
- return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
- CGF.ConvertType(E->getType()));
- else
- // Extract old value and emit it using the same type as compare value.
- return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
- ValueType);
- }
- /// This function should be invoked to emit atomic cmpxchg for Microsoft's
- /// _InterlockedCompareExchange* intrinsics which have the following signature:
- /// T _InterlockedCompareExchange(T volatile *Destination,
- /// T Exchange,
- /// T Comparand);
- ///
- /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
- /// cmpxchg *Destination, Comparand, Exchange.
- /// So we need to swap Comparand and Exchange when invoking
- /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
- /// function MakeAtomicCmpXchgValue since it expects the arguments to be
- /// already swapped.
- static
- Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
- AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
- assert(E->getArg(0)->getType()->isPointerType());
- assert(CGF.getContext().hasSameUnqualifiedType(
- E->getType(), E->getArg(0)->getType()->getPointeeType()));
- assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
- E->getArg(1)->getType()));
- assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
- E->getArg(2)->getType()));
- auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
- auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
- auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
- // For Release ordering, the failure ordering should be Monotonic.
- auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
- AtomicOrdering::Monotonic :
- SuccessOrdering;
- auto *Result = CGF.Builder.CreateAtomicCmpXchg(
- Destination, Comparand, Exchange,
- SuccessOrdering, FailureOrdering);
- Result->setVolatile(true);
- return CGF.Builder.CreateExtractValue(Result, 0);
- }
- static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
- AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
- assert(E->getArg(0)->getType()->isPointerType());
- auto *IntTy = CGF.ConvertType(E->getType());
- auto *Result = CGF.Builder.CreateAtomicRMW(
- AtomicRMWInst::Add,
- CGF.EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- Ordering);
- return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
- }
- static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
- AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
- assert(E->getArg(0)->getType()->isPointerType());
- auto *IntTy = CGF.ConvertType(E->getType());
- auto *Result = CGF.Builder.CreateAtomicRMW(
- AtomicRMWInst::Sub,
- CGF.EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- Ordering);
- return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
- }
- // Build a plain volatile load.
- static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
- Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
- QualType ElTy = E->getArg(0)->getType()->getPointeeType();
- CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy =
- llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
- Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
- Load->setVolatile(true);
- return Load;
- }
- // Build a plain volatile store.
- static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
- Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
- Value *Value = CGF.EmitScalarExpr(E->getArg(1));
- QualType ElTy = E->getArg(0)->getType()->getPointeeType();
- CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy =
- llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
- Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::StoreInst *Store =
- CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
- Store->setVolatile(true);
- return Store;
- }
- // Emit a simple mangled intrinsic that has 1 argument and a return type
- // matching the argument type.
- static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
- llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, Src0);
- }
- // Emit an intrinsic that has 2 operands of the same type as its result.
- static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
- llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, { Src0, Src1 });
- }
- // Emit an intrinsic that has 3 operands of the same type as its result.
- static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
- llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
- llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
- }
- // Emit an intrinsic that has 1 float or double operand, and 1 integer.
- static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
- llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, {Src0, Src1});
- }
- // Emit an intrinsic that has overloaded integer result and fp operand.
- static Value *emitFPToIntRoundBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
- llvm::Type *ResultType = CGF.ConvertType(E->getType());
- llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID,
- {ResultType, Src0->getType()});
- return CGF.Builder.CreateCall(F, Src0);
- }
- /// EmitFAbs - Emit a call to @llvm.fabs().
- static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
- Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
- llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
- Call->setDoesNotAccessMemory();
- return Call;
- }
- /// Emit the computation of the sign bit for a floating point value. Returns
- /// the i1 sign bit value.
- static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
- LLVMContext &C = CGF.CGM.getLLVMContext();
- llvm::Type *Ty = V->getType();
- int Width = Ty->getPrimitiveSizeInBits();
- llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
- V = CGF.Builder.CreateBitCast(V, IntTy);
- if (Ty->isPPC_FP128Ty()) {
- // We want the sign bit of the higher-order double. The bitcast we just
- // did works as if the double-double was stored to memory and then
- // read as an i128. The "store" will put the higher-order double in the
- // lower address in both little- and big-Endian modes, but the "load"
- // will treat those bits as a different part of the i128: the low bits in
- // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
- // we need to shift the high bits down to the low before truncating.
- Width >>= 1;
- if (CGF.getTarget().isBigEndian()) {
- Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
- V = CGF.Builder.CreateLShr(V, ShiftCst);
- }
- // We are truncating value in order to extract the higher-order
- // double, which we will be using to extract the sign from.
- IntTy = llvm::IntegerType::get(C, Width);
- V = CGF.Builder.CreateTrunc(V, IntTy);
- }
- Value *Zero = llvm::Constant::getNullValue(IntTy);
- return CGF.Builder.CreateICmpSLT(V, Zero);
- }
- static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
- const CallExpr *E, llvm::Constant *calleeValue) {
- CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
- return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
- }
- /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
- /// depending on IntrinsicID.
- ///
- /// \arg CGF The current codegen function.
- /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
- /// \arg X The first argument to the llvm.*.with.overflow.*.
- /// \arg Y The second argument to the llvm.*.with.overflow.*.
- /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
- /// \returns The result (i.e. sum/product) returned by the intrinsic.
- static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
- const llvm::Intrinsic::ID IntrinsicID,
- llvm::Value *X, llvm::Value *Y,
- llvm::Value *&Carry) {
- // Make sure we have integers of the same width.
- assert(X->getType() == Y->getType() &&
- "Arguments must be the same type. (Did you forget to make sure both "
- "arguments have the same integer width?)");
- Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
- llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
- Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
- return CGF.Builder.CreateExtractValue(Tmp, 0);
- }
- static Value *emitRangedBuiltin(CodeGenFunction &CGF,
- unsigned IntrinsicID,
- int low, int high) {
- llvm::MDBuilder MDHelper(CGF.getLLVMContext());
- llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
- llvm::Instruction *Call = CGF.Builder.CreateCall(F);
- Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
- return Call;
- }
- namespace {
- struct WidthAndSignedness {
- unsigned Width;
- bool Signed;
- };
- }
- static WidthAndSignedness
- getIntegerWidthAndSignedness(const clang::ASTContext &context,
- const clang::QualType Type) {
- assert(Type->isIntegerType() && "Given type is not an integer.");
- unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
- bool Signed = Type->isSignedIntegerType();
- return {Width, Signed};
- }
- // Given one or more integer types, this function produces an integer type that
- // encompasses them: any value in one of the given types could be expressed in
- // the encompassing type.
- static struct WidthAndSignedness
- EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
- assert(Types.size() > 0 && "Empty list of types.");
- // If any of the given types is signed, we must return a signed type.
- bool Signed = false;
- for (const auto &Type : Types) {
- Signed |= Type.Signed;
- }
- // The encompassing type must have a width greater than or equal to the width
- // of the specified types. Additionally, if the encompassing type is signed,
- // its width must be strictly greater than the width of any unsigned types
- // given.
- unsigned Width = 0;
- for (const auto &Type : Types) {
- unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
- if (Width < MinWidth) {
- Width = MinWidth;
- }
- }
- return {Width, Signed};
- }
- Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
- llvm::Type *DestType = Int8PtrTy;
- if (ArgValue->getType() != DestType)
- ArgValue =
- Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
- Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
- return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
- }
- /// Checks if using the result of __builtin_object_size(p, @p From) in place of
- /// __builtin_object_size(p, @p To) is correct
- static bool areBOSTypesCompatible(int From, int To) {
- // Note: Our __builtin_object_size implementation currently treats Type=0 and
- // Type=2 identically. Encoding this implementation detail here may make
- // improving __builtin_object_size difficult in the future, so it's omitted.
- return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
- }
- static llvm::Value *
- getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
- return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
- }
- llvm::Value *
- CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
- llvm::IntegerType *ResType,
- llvm::Value *EmittedE,
- bool IsDynamic) {
- uint64_t ObjectSize;
- if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
- return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
- return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
- }
- /// Returns a Value corresponding to the size of the given expression.
- /// This Value may be either of the following:
- /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
- /// it)
- /// - A call to the @llvm.objectsize intrinsic
- ///
- /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
- /// and we wouldn't otherwise try to reference a pass_object_size parameter,
- /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
- llvm::Value *
- CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
- llvm::IntegerType *ResType,
- llvm::Value *EmittedE, bool IsDynamic) {
- // We need to reference an argument if the pointer is a parameter with the
- // pass_object_size attribute.
- if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
- auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
- auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
- if (Param != nullptr && PS != nullptr &&
- areBOSTypesCompatible(PS->getType(), Type)) {
- auto Iter = SizeArguments.find(Param);
- assert(Iter != SizeArguments.end());
- const ImplicitParamDecl *D = Iter->second;
- auto DIter = LocalDeclMap.find(D);
- assert(DIter != LocalDeclMap.end());
- return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
- getContext().getSizeType(), E->getBeginLoc());
- }
- }
- // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
- // evaluate E for side-effects. In either case, we shouldn't lower to
- // @llvm.objectsize.
- if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
- return getDefaultBuiltinObjectSizeResult(Type, ResType);
- Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
- assert(Ptr->getType()->isPointerTy() &&
- "Non-pointer passed to __builtin_object_size?");
- Function *F =
- CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
- // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
- Value *Min = Builder.getInt1((Type & 2) != 0);
- // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
- Value *NullIsUnknown = Builder.getTrue();
- Value *Dynamic = Builder.getInt1(IsDynamic);
- return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
- }
- namespace {
- /// A struct to generically describe a bit test intrinsic.
- struct BitTest {
- enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
- enum InterlockingKind : uint8_t {
- Unlocked,
- Sequential,
- Acquire,
- Release,
- NoFence
- };
- ActionKind Action;
- InterlockingKind Interlocking;
- bool Is64Bit;
- static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
- };
- } // namespace
- BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
- switch (BuiltinID) {
- // Main portable variants.
- case Builtin::BI_bittest:
- return {TestOnly, Unlocked, false};
- case Builtin::BI_bittestandcomplement:
- return {Complement, Unlocked, false};
- case Builtin::BI_bittestandreset:
- return {Reset, Unlocked, false};
- case Builtin::BI_bittestandset:
- return {Set, Unlocked, false};
- case Builtin::BI_interlockedbittestandreset:
- return {Reset, Sequential, false};
- case Builtin::BI_interlockedbittestandset:
- return {Set, Sequential, false};
- // X86-specific 64-bit variants.
- case Builtin::BI_bittest64:
- return {TestOnly, Unlocked, true};
- case Builtin::BI_bittestandcomplement64:
- return {Complement, Unlocked, true};
- case Builtin::BI_bittestandreset64:
- return {Reset, Unlocked, true};
- case Builtin::BI_bittestandset64:
- return {Set, Unlocked, true};
- case Builtin::BI_interlockedbittestandreset64:
- return {Reset, Sequential, true};
- case Builtin::BI_interlockedbittestandset64:
- return {Set, Sequential, true};
- // ARM/AArch64-specific ordering variants.
- case Builtin::BI_interlockedbittestandset_acq:
- return {Set, Acquire, false};
- case Builtin::BI_interlockedbittestandset_rel:
- return {Set, Release, false};
- case Builtin::BI_interlockedbittestandset_nf:
- return {Set, NoFence, false};
- case Builtin::BI_interlockedbittestandreset_acq:
- return {Reset, Acquire, false};
- case Builtin::BI_interlockedbittestandreset_rel:
- return {Reset, Release, false};
- case Builtin::BI_interlockedbittestandreset_nf:
- return {Reset, NoFence, false};
- }
- llvm_unreachable("expected only bittest intrinsics");
- }
- static char bitActionToX86BTCode(BitTest::ActionKind A) {
- switch (A) {
- case BitTest::TestOnly: return '\0';
- case BitTest::Complement: return 'c';
- case BitTest::Reset: return 'r';
- case BitTest::Set: return 's';
- }
- llvm_unreachable("invalid action");
- }
- static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
- BitTest BT,
- const CallExpr *E, Value *BitBase,
- Value *BitPos) {
- char Action = bitActionToX86BTCode(BT.Action);
- char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
- // Build the assembly.
- SmallString<64> Asm;
- raw_svector_ostream AsmOS(Asm);
- if (BT.Interlocking != BitTest::Unlocked)
- AsmOS << "lock ";
- AsmOS << "bt";
- if (Action)
- AsmOS << Action;
- AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
- // Build the constraints. FIXME: We should support immediates when possible.
- std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
- llvm::IntegerType *IntType = llvm::IntegerType::get(
- CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(E->getArg(1)->getType()));
- llvm::Type *IntPtrType = IntType->getPointerTo();
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
- llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
- return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
- }
- static llvm::AtomicOrdering
- getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
- switch (I) {
- case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
- case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
- case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
- case BitTest::Release: return llvm::AtomicOrdering::Release;
- case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
- }
- llvm_unreachable("invalid interlocking");
- }
- /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
- /// bits and a bit position and read and optionally modify the bit at that
- /// position. The position index can be arbitrarily large, i.e. it can be larger
- /// than 31 or 63, so we need an indexed load in the general case.
- static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
- unsigned BuiltinID,
- const CallExpr *E) {
- Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
- Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
- BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
- // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
- // indexing operation internally. Use them if possible.
- llvm::Triple::ArchType Arch = CGF.getTarget().getTriple().getArch();
- if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64)
- return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
- // Otherwise, use generic code to load one byte and test the bit. Use all but
- // the bottom three bits as the array index, and the bottom three bits to form
- // a mask.
- // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
- Value *ByteIndex = CGF.Builder.CreateAShr(
- BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
- Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
- Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
- ByteIndex, "bittest.byteaddr"),
- CharUnits::One());
- Value *PosLow =
- CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
- llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
- // The updating instructions will need a mask.
- Value *Mask = nullptr;
- if (BT.Action != BitTest::TestOnly) {
- Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
- "bittest.mask");
- }
- // Check the action and ordering of the interlocked intrinsics.
- llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
- Value *OldByte = nullptr;
- if (Ordering != llvm::AtomicOrdering::NotAtomic) {
- // Emit a combined atomicrmw load/store operation for the interlocked
- // intrinsics.
- llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
- if (BT.Action == BitTest::Reset) {
- Mask = CGF.Builder.CreateNot(Mask);
- RMWOp = llvm::AtomicRMWInst::And;
- }
- OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
- Ordering);
- } else {
- // Emit a plain load for the non-interlocked intrinsics.
- OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
- Value *NewByte = nullptr;
- switch (BT.Action) {
- case BitTest::TestOnly:
- // Don't store anything.
- break;
- case BitTest::Complement:
- NewByte = CGF.Builder.CreateXor(OldByte, Mask);
- break;
- case BitTest::Reset:
- NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
- break;
- case BitTest::Set:
- NewByte = CGF.Builder.CreateOr(OldByte, Mask);
- break;
- }
- if (NewByte)
- CGF.Builder.CreateStore(NewByte, ByteAddr);
- }
- // However we loaded the old byte, either by plain load or atomicrmw, shift
- // the bit into the low position and mask it to 0 or 1.
- Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
- return CGF.Builder.CreateAnd(
- ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
- }
- namespace {
- enum class MSVCSetJmpKind {
- _setjmpex,
- _setjmp3,
- _setjmp
- };
- }
- /// MSVC handles setjmp a bit differently on different platforms. On every
- /// architecture except 32-bit x86, the frame address is passed. On x86, extra
- /// parameters can be passed as variadic arguments, but we always pass none.
- static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
- const CallExpr *E) {
- llvm::Value *Arg1 = nullptr;
- llvm::Type *Arg1Ty = nullptr;
- StringRef Name;
- bool IsVarArg = false;
- if (SJKind == MSVCSetJmpKind::_setjmp3) {
- Name = "_setjmp3";
- Arg1Ty = CGF.Int32Ty;
- Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
- IsVarArg = true;
- } else {
- Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
- Arg1Ty = CGF.Int8PtrTy;
- if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
- Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::sponentry));
- } else
- Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress),
- llvm::ConstantInt::get(CGF.Int32Ty, 0));
- }
- // Mark the call site and declaration with ReturnsTwice.
- llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
- llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
- CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::ReturnsTwice);
- llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
- ReturnsTwiceAttr, /*Local=*/true);
- llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
- CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
- llvm::Value *Args[] = {Buf, Arg1};
- llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
- CB->setAttributes(ReturnsTwiceAttr);
- return RValue::get(CB);
- }
- // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
- // we handle them here.
- enum class CodeGenFunction::MSVCIntrin {
- _BitScanForward,
- _BitScanReverse,
- _InterlockedAnd,
- _InterlockedDecrement,
- _InterlockedExchange,
- _InterlockedExchangeAdd,
- _InterlockedExchangeSub,
- _InterlockedIncrement,
- _InterlockedOr,
- _InterlockedXor,
- _InterlockedExchangeAdd_acq,
- _InterlockedExchangeAdd_rel,
- _InterlockedExchangeAdd_nf,
- _InterlockedExchange_acq,
- _InterlockedExchange_rel,
- _InterlockedExchange_nf,
- _InterlockedCompareExchange_acq,
- _InterlockedCompareExchange_rel,
- _InterlockedCompareExchange_nf,
- _InterlockedOr_acq,
- _InterlockedOr_rel,
- _InterlockedOr_nf,
- _InterlockedXor_acq,
- _InterlockedXor_rel,
- _InterlockedXor_nf,
- _InterlockedAnd_acq,
- _InterlockedAnd_rel,
- _InterlockedAnd_nf,
- _InterlockedIncrement_acq,
- _InterlockedIncrement_rel,
- _InterlockedIncrement_nf,
- _InterlockedDecrement_acq,
- _InterlockedDecrement_rel,
- _InterlockedDecrement_nf,
- __fastfail,
- };
- Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
- const CallExpr *E) {
- switch (BuiltinID) {
- case MSVCIntrin::_BitScanForward:
- case MSVCIntrin::_BitScanReverse: {
- Value *ArgValue = EmitScalarExpr(E->getArg(1));
- llvm::Type *ArgType = ArgValue->getType();
- llvm::Type *IndexType =
- EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *ArgZero = llvm::Constant::getNullValue(ArgType);
- Value *ResZero = llvm::Constant::getNullValue(ResultType);
- Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
- BasicBlock *Begin = Builder.GetInsertBlock();
- BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
- Builder.SetInsertPoint(End);
- PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
- Builder.SetInsertPoint(Begin);
- Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
- BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
- Builder.CreateCondBr(IsZero, End, NotZero);
- Result->addIncoming(ResZero, Begin);
- Builder.SetInsertPoint(NotZero);
- Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
- if (BuiltinID == MSVCIntrin::_BitScanForward) {
- Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
- ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
- Builder.CreateStore(ZeroCount, IndexAddress, false);
- } else {
- unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
- Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
- Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
- ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
- Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
- Builder.CreateStore(Index, IndexAddress, false);
- }
- Builder.CreateBr(End);
- Result->addIncoming(ResOne, NotZero);
- Builder.SetInsertPoint(End);
- return Result;
- }
- case MSVCIntrin::_InterlockedAnd:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
- case MSVCIntrin::_InterlockedExchange:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
- case MSVCIntrin::_InterlockedExchangeAdd:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
- case MSVCIntrin::_InterlockedExchangeSub:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
- case MSVCIntrin::_InterlockedOr:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
- case MSVCIntrin::_InterlockedXor:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
- case MSVCIntrin::_InterlockedExchangeAdd_acq:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
- AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedExchangeAdd_rel:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
- AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedExchangeAdd_nf:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
- AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedExchange_acq:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
- AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedExchange_rel:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
- AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedExchange_nf:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
- AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedCompareExchange_acq:
- return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedCompareExchange_rel:
- return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedCompareExchange_nf:
- return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedOr_acq:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
- AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedOr_rel:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
- AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedOr_nf:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
- AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedXor_acq:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
- AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedXor_rel:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
- AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedXor_nf:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
- AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedAnd_acq:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
- AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedAnd_rel:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
- AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedAnd_nf:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
- AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedIncrement_acq:
- return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedIncrement_rel:
- return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedIncrement_nf:
- return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedDecrement_acq:
- return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedDecrement_rel:
- return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedDecrement_nf:
- return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedDecrement:
- return EmitAtomicDecrementValue(*this, E);
- case MSVCIntrin::_InterlockedIncrement:
- return EmitAtomicIncrementValue(*this, E);
- case MSVCIntrin::__fastfail: {
- // Request immediate process termination from the kernel. The instruction
- // sequences to do this are documented on MSDN:
- // https://msdn.microsoft.com/en-us/library/dn774154.aspx
- llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
- StringRef Asm, Constraints;
- switch (ISA) {
- default:
- ErrorUnsupported(E, "__fastfail call for this architecture");
- break;
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- Asm = "int $$0x29";
- Constraints = "{cx}";
- break;
- case llvm::Triple::thumb:
- Asm = "udf #251";
- Constraints = "{r0}";
- break;
- case llvm::Triple::aarch64:
- Asm = "brk #0xF003";
- Constraints = "{w0}";
- }
- llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
- llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
- llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
- getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoReturn);
- llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
- CI->setAttributes(NoReturnAttr);
- return CI;
- }
- }
- llvm_unreachable("Incorrect MSVC intrinsic!");
- }
- namespace {
- // ARC cleanup for __builtin_os_log_format
- struct CallObjCArcUse final : EHScopeStack::Cleanup {
- CallObjCArcUse(llvm::Value *object) : object(object) {}
- llvm::Value *object;
- void Emit(CodeGenFunction &CGF, Flags flags) override {
- CGF.EmitARCIntrinsicUse(object);
- }
- };
- }
- Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
- BuiltinCheckKind Kind) {
- assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
- && "Unsupported builtin check kind");
- Value *ArgValue = EmitScalarExpr(E);
- if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
- return ArgValue;
- SanitizerScope SanScope(this);
- Value *Cond = Builder.CreateICmpNE(
- ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
- EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
- SanitizerHandler::InvalidBuiltin,
- {EmitCheckSourceLocation(E->getExprLoc()),
- llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
- None);
- return ArgValue;
- }
- /// Get the argument type for arguments to os_log_helper.
- static CanQualType getOSLogArgType(ASTContext &C, int Size) {
- QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
- return C.getCanonicalType(UnsignedTy);
- }
- llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
- const analyze_os_log::OSLogBufferLayout &Layout,
- CharUnits BufferAlignment) {
- ASTContext &Ctx = getContext();
- llvm::SmallString<64> Name;
- {
- raw_svector_ostream OS(Name);
- OS << "__os_log_helper";
- OS << "_" << BufferAlignment.getQuantity();
- OS << "_" << int(Layout.getSummaryByte());
- OS << "_" << int(Layout.getNumArgsByte());
- for (const auto &Item : Layout.Items)
- OS << "_" << int(Item.getSizeByte()) << "_"
- << int(Item.getDescriptorByte());
- }
- if (llvm::Function *F = CGM.getModule().getFunction(Name))
- return F;
- llvm::SmallVector<QualType, 4> ArgTys;
- FunctionArgList Args;
- Args.push_back(ImplicitParamDecl::Create(
- Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
- ImplicitParamDecl::Other));
- ArgTys.emplace_back(Ctx.VoidPtrTy);
- for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
- char Size = Layout.Items[I].getSizeByte();
- if (!Size)
- continue;
- QualType ArgTy = getOSLogArgType(Ctx, Size);
- Args.push_back(ImplicitParamDecl::Create(
- Ctx, nullptr, SourceLocation(),
- &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
- ImplicitParamDecl::Other));
- ArgTys.emplace_back(ArgTy);
- }
- QualType ReturnTy = Ctx.VoidTy;
- QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
- // The helper function has linkonce_odr linkage to enable the linker to merge
- // identical functions. To ensure the merging always happens, 'noinline' is
- // attached to the function when compiling with -Oz.
- const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
- llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = llvm::Function::Create(
- FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
- Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
- CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
- Fn->setDoesNotThrow();
- // Attach 'noinline' at -Oz.
- if (CGM.getCodeGenOpts().OptimizeSize == 2)
- Fn->addFnAttr(llvm::Attribute::NoInline);
- auto NL = ApplyDebugLocation::CreateEmpty(*this);
- IdentifierInfo *II = &Ctx.Idents.get(Name);
- FunctionDecl *FD = FunctionDecl::Create(
- Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
- FuncionTy, nullptr, SC_PrivateExtern, false, false);
- StartFunction(FD, ReturnTy, Fn, FI, Args);
- // Create a scope with an artificial location for the body of this function.
- auto AL = ApplyDebugLocation::CreateArtificial(*this);
- CharUnits Offset;
- Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
- BufferAlignment);
- Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
- Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
- Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
- Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
- unsigned I = 1;
- for (const auto &Item : Layout.Items) {
- Builder.CreateStore(
- Builder.getInt8(Item.getDescriptorByte()),
- Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
- Builder.CreateStore(
- Builder.getInt8(Item.getSizeByte()),
- Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
- CharUnits Size = Item.size();
- if (!Size.getQuantity())
- continue;
- Address Arg = GetAddrOfLocalVar(Args[I]);
- Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
- Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
- "argDataCast");
- Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
- Offset += Size;
- ++I;
- }
- FinishFunction();
- return Fn;
- }
- RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
- assert(E.getNumArgs() >= 2 &&
- "__builtin_os_log_format takes at least 2 arguments");
- ASTContext &Ctx = getContext();
- analyze_os_log::OSLogBufferLayout Layout;
- analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
- Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
- llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
- // Ignore argument 1, the format string. It is not currently used.
- CallArgList Args;
- Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
- for (const auto &Item : Layout.Items) {
- int Size = Item.getSizeByte();
- if (!Size)
- continue;
- llvm::Value *ArgVal;
- if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
- uint64_t Val = 0;
- for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
- Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
- ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
- } else if (const Expr *TheExpr = Item.getExpr()) {
- ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
- // Check if this is a retainable type.
- if (TheExpr->getType()->isObjCRetainableType()) {
- assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
- "Only scalar can be a ObjC retainable type");
- // Check if the object is constant, if not, save it in
- // RetainableOperands.
- if (!isa<Constant>(ArgVal))
- RetainableOperands.push_back(ArgVal);
- }
- } else {
- ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
- }
- unsigned ArgValSize =
- CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
- llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
- ArgValSize);
- ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
- CanQualType ArgTy = getOSLogArgType(Ctx, Size);
- // If ArgVal has type x86_fp80, zero-extend ArgVal.
- ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
- Args.add(RValue::get(ArgVal), ArgTy);
- }
- const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
- llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
- Layout, BufAddr.getAlignment());
- EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
- // Push a clang.arc.use cleanup for each object in RetainableOperands. The
- // cleanup will cause the use to appear after the final log call, keeping
- // the object valid while it’s held in the log buffer. Note that if there’s
- // a release cleanup on the object, it will already be active; since
- // cleanups are emitted in reverse order, the use will occur before the
- // object is released.
- if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
- CGM.getCodeGenOpts().OptimizationLevel != 0)
- for (llvm::Value *Object : RetainableOperands)
- pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object);
- return RValue::get(BufAddr.getPointer());
- }
- /// Determine if a binop is a checked mixed-sign multiply we can specialize.
- static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
- WidthAndSignedness Op1Info,
- WidthAndSignedness Op2Info,
- WidthAndSignedness ResultInfo) {
- return BuiltinID == Builtin::BI__builtin_mul_overflow &&
- std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
- Op1Info.Signed != Op2Info.Signed;
- }
- /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
- /// the generic checked-binop irgen.
- static RValue
- EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
- WidthAndSignedness Op1Info, const clang::Expr *Op2,
- WidthAndSignedness Op2Info,
- const clang::Expr *ResultArg, QualType ResultQTy,
- WidthAndSignedness ResultInfo) {
- assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
- Op2Info, ResultInfo) &&
- "Not a mixed-sign multipliction we can specialize");
- // Emit the signed and unsigned operands.
- const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
- const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
- llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
- llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
- unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
- unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
- // One of the operands may be smaller than the other. If so, [s|z]ext it.
- if (SignedOpWidth < UnsignedOpWidth)
- Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
- if (UnsignedOpWidth < SignedOpWidth)
- Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
- llvm::Type *OpTy = Signed->getType();
- llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
- Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
- llvm::Type *ResTy = ResultPtr.getElementType();
- unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
- // Take the absolute value of the signed operand.
- llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
- llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
- llvm::Value *AbsSigned =
- CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
- // Perform a checked unsigned multiplication.
- llvm::Value *UnsignedOverflow;
- llvm::Value *UnsignedResult =
- EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
- Unsigned, UnsignedOverflow);
- llvm::Value *Overflow, *Result;
- if (ResultInfo.Signed) {
- // Signed overflow occurs if the result is greater than INT_MAX or lesser
- // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
- auto IntMax =
- llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
- llvm::Value *MaxResult =
- CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
- CGF.Builder.CreateZExt(IsNegative, OpTy));
- llvm::Value *SignedOverflow =
- CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
- Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
- // Prepare the signed result (possibly by negating it).
- llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
- llvm::Value *SignedResult =
- CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
- Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
- } else {
- // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
- llvm::Value *Underflow = CGF.Builder.CreateAnd(
- IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
- Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
- if (ResultInfo.Width < OpWidth) {
- auto IntMax =
- llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
- llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
- UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
- Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
- }
- // Negate the product if it would be negative in infinite precision.
- Result = CGF.Builder.CreateSelect(
- IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
- Result = CGF.Builder.CreateTrunc(Result, ResTy);
- }
- assert(Overflow && Result && "Missing overflow or result");
- bool isVolatile =
- ResultArg->getType()->getPointeeType().isVolatileQualified();
- CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
- isVolatile);
- return RValue::get(Overflow);
- }
- static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
- Value *&RecordPtr, CharUnits Align,
- llvm::FunctionCallee Func, int Lvl) {
- const auto *RT = RType->getAs<RecordType>();
- ASTContext &Context = CGF.getContext();
- RecordDecl *RD = RT->getDecl()->getDefinition();
- std::string Pad = std::string(Lvl * 4, ' ');
- Value *GString =
- CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
- Value *Res = CGF.Builder.CreateCall(Func, {GString});
- static llvm::DenseMap<QualType, const char *> Types;
- if (Types.empty()) {
- Types[Context.CharTy] = "%c";
- Types[Context.BoolTy] = "%d";
- Types[Context.SignedCharTy] = "%hhd";
- Types[Context.UnsignedCharTy] = "%hhu";
- Types[Context.IntTy] = "%d";
- Types[Context.UnsignedIntTy] = "%u";
- Types[Context.LongTy] = "%ld";
- Types[Context.UnsignedLongTy] = "%lu";
- Types[Context.LongLongTy] = "%lld";
- Types[Context.UnsignedLongLongTy] = "%llu";
- Types[Context.ShortTy] = "%hd";
- Types[Context.UnsignedShortTy] = "%hu";
- Types[Context.VoidPtrTy] = "%p";
- Types[Context.FloatTy] = "%f";
- Types[Context.DoubleTy] = "%f";
- Types[Context.LongDoubleTy] = "%Lf";
- Types[Context.getPointerType(Context.CharTy)] = "%s";
- Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
- }
- for (const auto *FD : RD->fields()) {
- Value *FieldPtr = RecordPtr;
- if (RD->isUnion())
- FieldPtr = CGF.Builder.CreatePointerCast(
- FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
- else
- FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
- FD->getFieldIndex());
- GString = CGF.Builder.CreateGlobalStringPtr(
- llvm::Twine(Pad)
- .concat(FD->getType().getAsString())
- .concat(llvm::Twine(' '))
- .concat(FD->getNameAsString())
- .concat(" : ")
- .str());
- Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
- QualType CanonicalType =
- FD->getType().getUnqualifiedType().getCanonicalType();
- // We check whether we are in a recursive type
- if (CanonicalType->isRecordType()) {
- Value *TmpRes =
- dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
- Res = CGF.Builder.CreateAdd(TmpRes, Res);
- continue;
- }
- // We try to determine the best format to print the current field
- llvm::Twine Format = Types.find(CanonicalType) == Types.end()
- ? Types[Context.VoidPtrTy]
- : Types[CanonicalType];
- Address FieldAddress = Address(FieldPtr, Align);
- FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
- // FIXME Need to handle bitfield here
- GString = CGF.Builder.CreateGlobalStringPtr(
- Format.concat(llvm::Twine('\n')).str());
- TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
- }
- GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
- Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
- return Res;
- }
- static bool
- TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
- llvm::SmallPtrSetImpl<const Decl *> &Seen) {
- if (const auto *Arr = Ctx.getAsArrayType(Ty))
- Ty = Ctx.getBaseElementType(Arr);
- const auto *Record = Ty->getAsCXXRecordDecl();
- if (!Record)
- return false;
- // We've already checked this type, or are in the process of checking it.
- if (!Seen.insert(Record).second)
- return false;
- assert(Record->hasDefinition() &&
- "Incomplete types should already be diagnosed");
- if (Record->isDynamicClass())
- return true;
- for (FieldDecl *F : Record->fields()) {
- if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
- return true;
- }
- return false;
- }
- /// Determine if the specified type requires laundering by checking if it is a
- /// dynamic class type or contains a subobject which is a dynamic class type.
- static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
- if (!CGM.getCodeGenOpts().StrictVTablePointers)
- return false;
- llvm::SmallPtrSet<const Decl *, 16> Seen;
- return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
- }
- RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
- llvm::Value *Src = EmitScalarExpr(E->getArg(0));
- llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
- // The builtin's shift arg may have a different type than the source arg and
- // result, but the LLVM intrinsic uses the same type for all values.
- llvm::Type *Ty = Src->getType();
- ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
- // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
- unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
- Function *F = CGM.getIntrinsic(IID, Ty);
- return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
- }
- RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
- const CallExpr *E,
- ReturnValueSlot ReturnValue) {
- const FunctionDecl *FD = GD.getDecl()->getAsFunction();
- // See if we can constant fold this builtin. If so, don't emit it at all.
- Expr::EvalResult Result;
- if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
- !Result.hasSideEffects()) {
- if (Result.Val.isInt())
- return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
- Result.Val.getInt()));
- if (Result.Val.isFloat())
- return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
- Result.Val.getFloat()));
- }
- // There are LLVM math intrinsics/instructions corresponding to math library
- // functions except the LLVM op will never set errno while the math library
- // might. Also, math builtins have the same semantics as their math library
- // twins. Thus, we can transform math library and builtin calls to their
- // LLVM counterparts if the call is marked 'const' (known to never set errno).
- if (FD->hasAttr<ConstAttr>()) {
- switch (BuiltinID) {
- case Builtin::BIceil:
- case Builtin::BIceilf:
- case Builtin::BIceill:
- case Builtin::BI__builtin_ceil:
- case Builtin::BI__builtin_ceilf:
- case Builtin::BI__builtin_ceill:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil));
- case Builtin::BIcopysign:
- case Builtin::BIcopysignf:
- case Builtin::BIcopysignl:
- case Builtin::BI__builtin_copysign:
- case Builtin::BI__builtin_copysignf:
- case Builtin::BI__builtin_copysignl:
- case Builtin::BI__builtin_copysignf128:
- return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
- case Builtin::BIcos:
- case Builtin::BIcosf:
- case Builtin::BIcosl:
- case Builtin::BI__builtin_cos:
- case Builtin::BI__builtin_cosf:
- case Builtin::BI__builtin_cosl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::cos));
- case Builtin::BIexp:
- case Builtin::BIexpf:
- case Builtin::BIexpl:
- case Builtin::BI__builtin_exp:
- case Builtin::BI__builtin_expf:
- case Builtin::BI__builtin_expl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp));
- case Builtin::BIexp2:
- case Builtin::BIexp2f:
- case Builtin::BIexp2l:
- case Builtin::BI__builtin_exp2:
- case Builtin::BI__builtin_exp2f:
- case Builtin::BI__builtin_exp2l:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp2));
- case Builtin::BIfabs:
- case Builtin::BIfabsf:
- case Builtin::BIfabsl:
- case Builtin::BI__builtin_fabs:
- case Builtin::BI__builtin_fabsf:
- case Builtin::BI__builtin_fabsl:
- case Builtin::BI__builtin_fabsf128:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
- case Builtin::BIfloor:
- case Builtin::BIfloorf:
- case Builtin::BIfloorl:
- case Builtin::BI__builtin_floor:
- case Builtin::BI__builtin_floorf:
- case Builtin::BI__builtin_floorl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor));
- case Builtin::BIfma:
- case Builtin::BIfmaf:
- case Builtin::BIfmal:
- case Builtin::BI__builtin_fma:
- case Builtin::BI__builtin_fmaf:
- case Builtin::BI__builtin_fmal:
- return RValue::get(emitTernaryBuiltin(*this, E, Intrinsic::fma));
- case Builtin::BIfmax:
- case Builtin::BIfmaxf:
- case Builtin::BIfmaxl:
- case Builtin::BI__builtin_fmax:
- case Builtin::BI__builtin_fmaxf:
- case Builtin::BI__builtin_fmaxl:
- return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum));
- case Builtin::BIfmin:
- case Builtin::BIfminf:
- case Builtin::BIfminl:
- case Builtin::BI__builtin_fmin:
- case Builtin::BI__builtin_fminf:
- case Builtin::BI__builtin_fminl:
- return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum));
- // fmod() is a special-case. It maps to the frem instruction rather than an
- // LLVM intrinsic.
- case Builtin::BIfmod:
- case Builtin::BIfmodf:
- case Builtin::BIfmodl:
- case Builtin::BI__builtin_fmod:
- case Builtin::BI__builtin_fmodf:
- case Builtin::BI__builtin_fmodl: {
- Value *Arg1 = EmitScalarExpr(E->getArg(0));
- Value *Arg2 = EmitScalarExpr(E->getArg(1));
- return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
- }
- case Builtin::BIlog:
- case Builtin::BIlogf:
- case Builtin::BIlogl:
- case Builtin::BI__builtin_log:
- case Builtin::BI__builtin_logf:
- case Builtin::BI__builtin_logl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log));
- case Builtin::BIlog10:
- case Builtin::BIlog10f:
- case Builtin::BIlog10l:
- case Builtin::BI__builtin_log10:
- case Builtin::BI__builtin_log10f:
- case Builtin::BI__builtin_log10l:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log10));
- case Builtin::BIlog2:
- case Builtin::BIlog2f:
- case Builtin::BIlog2l:
- case Builtin::BI__builtin_log2:
- case Builtin::BI__builtin_log2f:
- case Builtin::BI__builtin_log2l:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log2));
- case Builtin::BInearbyint:
- case Builtin::BInearbyintf:
- case Builtin::BInearbyintl:
- case Builtin::BI__builtin_nearbyint:
- case Builtin::BI__builtin_nearbyintf:
- case Builtin::BI__builtin_nearbyintl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint));
- case Builtin::BIpow:
- case Builtin::BIpowf:
- case Builtin::BIpowl:
- case Builtin::BI__builtin_pow:
- case Builtin::BI__builtin_powf:
- case Builtin::BI__builtin_powl:
- return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::pow));
- case Builtin::BIrint:
- case Builtin::BIrintf:
- case Builtin::BIrintl:
- case Builtin::BI__builtin_rint:
- case Builtin::BI__builtin_rintf:
- case Builtin::BI__builtin_rintl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint));
- case Builtin::BIround:
- case Builtin::BIroundf:
- case Builtin::BIroundl:
- case Builtin::BI__builtin_round:
- case Builtin::BI__builtin_roundf:
- case Builtin::BI__builtin_roundl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round));
- case Builtin::BIsin:
- case Builtin::BIsinf:
- case Builtin::BIsinl:
- case Builtin::BI__builtin_sin:
- case Builtin::BI__builtin_sinf:
- case Builtin::BI__builtin_sinl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sin));
- case Builtin::BIsqrt:
- case Builtin::BIsqrtf:
- case Builtin::BIsqrtl:
- case Builtin::BI__builtin_sqrt:
- case Builtin::BI__builtin_sqrtf:
- case Builtin::BI__builtin_sqrtl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sqrt));
- case Builtin::BItrunc:
- case Builtin::BItruncf:
- case Builtin::BItruncl:
- case Builtin::BI__builtin_trunc:
- case Builtin::BI__builtin_truncf:
- case Builtin::BI__builtin_truncl:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc));
- case Builtin::BIlround:
- case Builtin::BIlroundf:
- case Builtin::BIlroundl:
- case Builtin::BI__builtin_lround:
- case Builtin::BI__builtin_lroundf:
- case Builtin::BI__builtin_lroundl:
- return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lround));
- case Builtin::BIllround:
- case Builtin::BIllroundf:
- case Builtin::BIllroundl:
- case Builtin::BI__builtin_llround:
- case Builtin::BI__builtin_llroundf:
- case Builtin::BI__builtin_llroundl:
- return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llround));
- case Builtin::BIlrint:
- case Builtin::BIlrintf:
- case Builtin::BIlrintl:
- case Builtin::BI__builtin_lrint:
- case Builtin::BI__builtin_lrintf:
- case Builtin::BI__builtin_lrintl:
- return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lrint));
- case Builtin::BIllrint:
- case Builtin::BIllrintf:
- case Builtin::BIllrintl:
- case Builtin::BI__builtin_llrint:
- case Builtin::BI__builtin_llrintf:
- case Builtin::BI__builtin_llrintl:
- return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llrint));
- default:
- break;
- }
- }
- switch (BuiltinID) {
- default: break;
- case Builtin::BI__builtin___CFStringMakeConstantString:
- case Builtin::BI__builtin___NSStringMakeConstantString:
- return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
- case Builtin::BI__builtin_stdarg_start:
- case Builtin::BI__builtin_va_start:
- case Builtin::BI__va_start:
- case Builtin::BI__builtin_va_end:
- return RValue::get(
- EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
- ? EmitScalarExpr(E->getArg(0))
- : EmitVAListRef(E->getArg(0)).getPointer(),
- BuiltinID != Builtin::BI__builtin_va_end));
- case Builtin::BI__builtin_va_copy: {
- Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
- Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
- llvm::Type *Type = Int8PtrTy;
- DstPtr = Builder.CreateBitCast(DstPtr, Type);
- SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
- return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
- {DstPtr, SrcPtr}));
- }
- case Builtin::BI__builtin_abs:
- case Builtin::BI__builtin_labs:
- case Builtin::BI__builtin_llabs: {
- // X < 0 ? -X : X
- // The negation has 'nsw' because abs of INT_MIN is undefined.
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
- Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
- Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
- Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_conj:
- case Builtin::BI__builtin_conjf:
- case Builtin::BI__builtin_conjl: {
- ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
- Value *Real = ComplexVal.first;
- Value *Imag = ComplexVal.second;
- Value *Zero =
- Imag->getType()->isFPOrFPVectorTy()
- ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
- : llvm::Constant::getNullValue(Imag->getType());
- Imag = Builder.CreateFSub(Zero, Imag, "sub");
- return RValue::getComplex(std::make_pair(Real, Imag));
- }
- case Builtin::BI__builtin_creal:
- case Builtin::BI__builtin_crealf:
- case Builtin::BI__builtin_creall:
- case Builtin::BIcreal:
- case Builtin::BIcrealf:
- case Builtin::BIcreall: {
- ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
- return RValue::get(ComplexVal.first);
- }
- case Builtin::BI__builtin_dump_struct: {
- llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
- llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
- LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
- Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
- CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
- const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
- QualType Arg0Type = Arg0->getType()->getPointeeType();
- Value *RecordPtr = EmitScalarExpr(Arg0);
- Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
- {LLVMFuncType, Func}, 0);
- return RValue::get(Res);
- }
- case Builtin::BI__builtin_preserve_access_index: {
- // Only enabled preserved access index region when debuginfo
- // is available as debuginfo is needed to preserve user-level
- // access pattern.
- if (!getDebugInfo()) {
- CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
- return RValue::get(EmitScalarExpr(E->getArg(0)));
- }
- // Nested builtin_preserve_access_index() not supported
- if (IsInPreservedAIRegion) {
- CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
- return RValue::get(EmitScalarExpr(E->getArg(0)));
- }
- IsInPreservedAIRegion = true;
- Value *Res = EmitScalarExpr(E->getArg(0));
- IsInPreservedAIRegion = false;
- return RValue::get(Res);
- }
- case Builtin::BI__builtin_cimag:
- case Builtin::BI__builtin_cimagf:
- case Builtin::BI__builtin_cimagl:
- case Builtin::BIcimag:
- case Builtin::BIcimagf:
- case Builtin::BIcimagl: {
- ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
- return RValue::get(ComplexVal.second);
- }
- case Builtin::BI__builtin_clrsb:
- case Builtin::BI__builtin_clrsbl:
- case Builtin::BI__builtin_clrsbll: {
- // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- llvm::Type *ArgType = ArgValue->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Zero = llvm::Constant::getNullValue(ArgType);
- Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
- Value *Inverse = Builder.CreateNot(ArgValue, "not");
- Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
- Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
- Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_ctzs:
- case Builtin::BI__builtin_ctz:
- case Builtin::BI__builtin_ctzl:
- case Builtin::BI__builtin_ctzll: {
- Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
- llvm::Type *ArgType = ArgValue->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
- Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
- if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_clzs:
- case Builtin::BI__builtin_clz:
- case Builtin::BI__builtin_clzl:
- case Builtin::BI__builtin_clzll: {
- Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
- llvm::Type *ArgType = ArgValue->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
- Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
- if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_ffs:
- case Builtin::BI__builtin_ffsl:
- case Builtin::BI__builtin_ffsll: {
- // ffs(x) -> x ? cttz(x) + 1 : 0
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- llvm::Type *ArgType = ArgValue->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp =
- Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
- llvm::ConstantInt::get(ArgType, 1));
- Value *Zero = llvm::Constant::getNullValue(ArgType);
- Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
- Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
- if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_parity:
- case Builtin::BI__builtin_parityl:
- case Builtin::BI__builtin_parityll: {
- // parity(x) -> ctpop(x) & 1
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- llvm::Type *ArgType = ArgValue->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateCall(F, ArgValue);
- Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
- if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
- }
- case Builtin::BI__lzcnt16:
- case Builtin::BI__lzcnt:
- case Builtin::BI__lzcnt64: {
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- llvm::Type *ArgType = ArgValue->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
- if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
- }
- case Builtin::BI__popcnt16:
- case Builtin::BI__popcnt:
- case Builtin::BI__popcnt64:
- case Builtin::BI__builtin_popcount:
- case Builtin::BI__builtin_popcountl:
- case Builtin::BI__builtin_popcountll: {
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- llvm::Type *ArgType = ArgValue->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue);
- if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_unpredictable: {
- // Always return the argument of __builtin_unpredictable. LLVM does not
- // handle this builtin. Metadata for this builtin should be added directly
- // to instructions such as branches or switches that use it.
- return RValue::get(EmitScalarExpr(E->getArg(0)));
- }
- case Builtin::BI__builtin_expect: {
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- llvm::Type *ArgType = ArgValue->getType();
- Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
- // Don't generate llvm.expect on -O0 as the backend won't use it for
- // anything.
- // Note, we still IRGen ExpectedValue because it could have side-effects.
- if (CGM.getCodeGenOpts().OptimizationLevel == 0)
- return RValue::get(ArgValue);
- Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
- Value *Result =
- Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_assume_aligned: {
- const Expr *Ptr = E->getArg(0);
- Value *PtrValue = EmitScalarExpr(Ptr);
- Value *OffsetValue =
- (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
- Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
- ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
- unsigned Alignment = (unsigned)AlignmentCI->getZExtValue();
- EmitAlignmentAssumption(PtrValue, Ptr,
- /*The expr loc is sufficient.*/ SourceLocation(),
- Alignment, OffsetValue);
- return RValue::get(PtrValue);
- }
- case Builtin::BI__assume:
- case Builtin::BI__builtin_assume: {
- if (E->getArg(0)->HasSideEffects(getContext()))
- return RValue::get(nullptr);
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
- return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
- }
- case Builtin::BI__builtin_bswap16:
- case Builtin::BI__builtin_bswap32:
- case Builtin::BI__builtin_bswap64: {
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
- }
- case Builtin::BI__builtin_bitreverse8:
- case Builtin::BI__builtin_bitreverse16:
- case Builtin::BI__builtin_bitreverse32:
- case Builtin::BI__builtin_bitreverse64: {
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
- }
- case Builtin::BI__builtin_rotateleft8:
- case Builtin::BI__builtin_rotateleft16:
- case Builtin::BI__builtin_rotateleft32:
- case Builtin::BI__builtin_rotateleft64:
- case Builtin::BI_rotl8: // Microsoft variants of rotate left
- case Builtin::BI_rotl16:
- case Builtin::BI_rotl:
- case Builtin::BI_lrotl:
- case Builtin::BI_rotl64:
- return emitRotate(E, false);
- case Builtin::BI__builtin_rotateright8:
- case Builtin::BI__builtin_rotateright16:
- case Builtin::BI__builtin_rotateright32:
- case Builtin::BI__builtin_rotateright64:
- case Builtin::BI_rotr8: // Microsoft variants of rotate right
- case Builtin::BI_rotr16:
- case Builtin::BI_rotr:
- case Builtin::BI_lrotr:
- case Builtin::BI_rotr64:
- return emitRotate(E, true);
- case Builtin::BI__builtin_constant_p: {
- llvm::Type *ResultType = ConvertType(E->getType());
- if (CGM.getCodeGenOpts().OptimizationLevel == 0)
- // At -O0, we don't perform inlining, so we don't need to delay the
- // processing.
- return RValue::get(ConstantInt::get(ResultType, 0));
- const Expr *Arg = E->getArg(0);
- QualType ArgType = Arg->getType();
- // FIXME: The allowance for Obj-C pointers and block pointers is historical
- // and likely a mistake.
- if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
- !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
- // Per the GCC documentation, only numeric constants are recognized after
- // inlining.
- return RValue::get(ConstantInt::get(ResultType, 0));
- if (Arg->HasSideEffects(getContext()))
- // The argument is unevaluated, so be conservative if it might have
- // side-effects.
- return RValue::get(ConstantInt::get(ResultType, 0));
- Value *ArgValue = EmitScalarExpr(Arg);
- if (ArgType->isObjCObjectPointerType()) {
- // Convert Objective-C objects to id because we cannot distinguish between
- // LLVM types for Obj-C classes as they are opaque.
- ArgType = CGM.getContext().getObjCIdType();
- ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
- }
- Function *F =
- CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
- Value *Result = Builder.CreateCall(F, ArgValue);
- if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_dynamic_object_size:
- case Builtin::BI__builtin_object_size: {
- unsigned Type =
- E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
- auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
- // We pass this builtin onto the optimizer so that it can figure out the
- // object size in more complex cases.
- bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
- return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
- /*EmittedE=*/nullptr, IsDynamic));
- }
- case Builtin::BI__builtin_prefetch: {
- Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
- // FIXME: Technically these constants should of type 'int', yes?
- RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
- llvm::ConstantInt::get(Int32Ty, 0);
- Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
- llvm::ConstantInt::get(Int32Ty, 3);
- Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
- Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
- return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
- }
- case Builtin::BI__builtin_readcyclecounter: {
- Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
- return RValue::get(Builder.CreateCall(F));
- }
- case Builtin::BI__builtin___clear_cache: {
- Value *Begin = EmitScalarExpr(E->getArg(0));
- Value *End = EmitScalarExpr(E->getArg(1));
- Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
- return RValue::get(Builder.CreateCall(F, {Begin, End}));
- }
- case Builtin::BI__builtin_trap:
- return RValue::get(EmitTrapCall(Intrinsic::trap));
- case Builtin::BI__debugbreak:
- return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
- case Builtin::BI__builtin_unreachable: {
- EmitUnreachable(E->getExprLoc());
- // We do need to preserve an insertion point.
- EmitBlock(createBasicBlock("unreachable.cont"));
- return RValue::get(nullptr);
- }
- case Builtin::BI__builtin_powi:
- case Builtin::BI__builtin_powif:
- case Builtin::BI__builtin_powil: {
- Value *Base = EmitScalarExpr(E->getArg(0));
- Value *Exponent = EmitScalarExpr(E->getArg(1));
- llvm::Type *ArgType = Base->getType();
- Function *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
- return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
- }
- case Builtin::BI__builtin_isgreater:
- case Builtin::BI__builtin_isgreaterequal:
- case Builtin::BI__builtin_isless:
- case Builtin::BI__builtin_islessequal:
- case Builtin::BI__builtin_islessgreater:
- case Builtin::BI__builtin_isunordered: {
- // Ordered comparisons: we know the arguments to these are matching scalar
- // floating point values.
- Value *LHS = EmitScalarExpr(E->getArg(0));
- Value *RHS = EmitScalarExpr(E->getArg(1));
- switch (BuiltinID) {
- default: llvm_unreachable("Unknown ordered comparison");
- case Builtin::BI__builtin_isgreater:
- LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
- break;
- case Builtin::BI__builtin_isgreaterequal:
- LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
- break;
- case Builtin::BI__builtin_isless:
- LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
- break;
- case Builtin::BI__builtin_islessequal:
- LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
- break;
- case Builtin::BI__builtin_islessgreater:
- LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
- break;
- case Builtin::BI__builtin_isunordered:
- LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
- break;
- }
- // ZExt bool to int type.
- return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
- }
- case Builtin::BI__builtin_isnan: {
- Value *V = EmitScalarExpr(E->getArg(0));
- V = Builder.CreateFCmpUNO(V, V, "cmp");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
- }
- case Builtin::BIfinite:
- case Builtin::BI__finite:
- case Builtin::BIfinitef:
- case Builtin::BI__finitef:
- case Builtin::BIfinitel:
- case Builtin::BI__finitel:
- case Builtin::BI__builtin_isinf:
- case Builtin::BI__builtin_isfinite: {
- // isinf(x) --> fabs(x) == infinity
- // isfinite(x) --> fabs(x) != infinity
- // x != NaN via the ordered compare in either case.
- Value *V = EmitScalarExpr(E->getArg(0));
- Value *Fabs = EmitFAbs(*this, V);
- Constant *Infinity = ConstantFP::getInfinity(V->getType());
- CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
- ? CmpInst::FCMP_OEQ
- : CmpInst::FCMP_ONE;
- Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
- return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
- }
- case Builtin::BI__builtin_isinf_sign: {
- // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
- Value *Arg = EmitScalarExpr(E->getArg(0));
- Value *AbsArg = EmitFAbs(*this, Arg);
- Value *IsInf = Builder.CreateFCmpOEQ(
- AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
- Value *IsNeg = EmitSignBit(*this, Arg);
- llvm::Type *IntTy = ConvertType(E->getType());
- Value *Zero = Constant::getNullValue(IntTy);
- Value *One = ConstantInt::get(IntTy, 1);
- Value *NegativeOne = ConstantInt::get(IntTy, -1);
- Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
- Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_isnormal: {
- // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
- Value *V = EmitScalarExpr(E->getArg(0));
- Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
- Value *Abs = EmitFAbs(*this, V);
- Value *IsLessThanInf =
- Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
- APFloat Smallest = APFloat::getSmallestNormalized(
- getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
- Value *IsNormal =
- Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
- "isnormal");
- V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
- V = Builder.CreateAnd(V, IsNormal, "and");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
- }
- case Builtin::BI__builtin_flt_rounds: {
- Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F);
- if (Result->getType() != ResultType)
- Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
- "cast");
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_fpclassify: {
- Value *V = EmitScalarExpr(E->getArg(5));
- llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
- // Create Result
- BasicBlock *Begin = Builder.GetInsertBlock();
- BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
- Builder.SetInsertPoint(End);
- PHINode *Result =
- Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
- "fpclassify_result");
- // if (V==0) return FP_ZERO
- Builder.SetInsertPoint(Begin);
- Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
- "iszero");
- Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
- BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
- Builder.CreateCondBr(IsZero, End, NotZero);
- Result->addIncoming(ZeroLiteral, Begin);
- // if (V != V) return FP_NAN
- Builder.SetInsertPoint(NotZero);
- Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
- Value *NanLiteral = EmitScalarExpr(E->getArg(0));
- BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
- Builder.CreateCondBr(IsNan, End, NotNan);
- Result->addIncoming(NanLiteral, NotZero);
- // if (fabs(V) == infinity) return FP_INFINITY
- Builder.SetInsertPoint(NotNan);
- Value *VAbs = EmitFAbs(*this, V);
- Value *IsInf =
- Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
- "isinf");
- Value *InfLiteral = EmitScalarExpr(E->getArg(1));
- BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
- Builder.CreateCondBr(IsInf, End, NotInf);
- Result->addIncoming(InfLiteral, NotNan);
- // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
- Builder.SetInsertPoint(NotInf);
- APFloat Smallest = APFloat::getSmallestNormalized(
- getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
- Value *IsNormal =
- Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
- "isnormal");
- Value *NormalResult =
- Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)));
- Builder.CreateBr(End);
- Result->addIncoming(NormalResult, NotInf);
- // return Result
- Builder.SetInsertPoint(End);
- return RValue::get(Result);
- }
- case Builtin::BIalloca:
- case Builtin::BI_alloca:
- case Builtin::BI__builtin_alloca: {
- Value *Size = EmitScalarExpr(E->getArg(0));
- const TargetInfo &TI = getContext().getTargetInfo();
- // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
- unsigned SuitableAlignmentInBytes =
- CGM.getContext()
- .toCharUnitsFromBits(TI.getSuitableAlign())
- .getQuantity();
- AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
- AI->setAlignment(SuitableAlignmentInBytes);
- initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
- return RValue::get(AI);
- }
- case Builtin::BI__builtin_alloca_with_align: {
- Value *Size = EmitScalarExpr(E->getArg(0));
- Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
- auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
- unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
- unsigned AlignmentInBytes =
- CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity();
- AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
- AI->setAlignment(AlignmentInBytes);
- initializeAlloca(*this, AI, Size, AlignmentInBytes);
- return RValue::get(AI);
- }
- case Builtin::BIbzero:
- case Builtin::BI__builtin_bzero: {
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Value *SizeVal = EmitScalarExpr(E->getArg(1));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
- return RValue::get(nullptr);
- }
- case Builtin::BImemcpy:
- case Builtin::BI__builtin_memcpy: {
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Address Src = EmitPointerWithAlignment(E->getArg(1));
- Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 1);
- Builder.CreateMemCpy(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
- }
- case Builtin::BI__builtin_char_memchr:
- BuiltinID = Builtin::BI__builtin_memchr;
- break;
- case Builtin::BI__builtin___memcpy_chk: {
- // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
- Expr::EvalResult SizeResult, DstSizeResult;
- if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
- break;
- llvm::APSInt Size = SizeResult.Val.getInt();
- llvm::APSInt DstSize = DstSizeResult.Val.getInt();
- if (Size.ugt(DstSize))
- break;
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Address Src = EmitPointerWithAlignment(E->getArg(1));
- Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemCpy(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
- }
- case Builtin::BI__builtin_objc_memmove_collectable: {
- Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
- Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
- Value *SizeVal = EmitScalarExpr(E->getArg(2));
- CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
- DestAddr, SrcAddr, SizeVal);
- return RValue::get(DestAddr.getPointer());
- }
- case Builtin::BI__builtin___memmove_chk: {
- // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
- Expr::EvalResult SizeResult, DstSizeResult;
- if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
- break;
- llvm::APSInt Size = SizeResult.Val.getInt();
- llvm::APSInt DstSize = DstSizeResult.Val.getInt();
- if (Size.ugt(DstSize))
- break;
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Address Src = EmitPointerWithAlignment(E->getArg(1));
- Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemMove(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
- }
- case Builtin::BImemmove:
- case Builtin::BI__builtin_memmove: {
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Address Src = EmitPointerWithAlignment(E->getArg(1));
- Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 1);
- Builder.CreateMemMove(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
- }
- case Builtin::BImemset:
- case Builtin::BI__builtin_memset: {
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
- Builder.getInt8Ty());
- Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
- return RValue::get(Dest.getPointer());
- }
- case Builtin::BI__builtin___memset_chk: {
- // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- Expr::EvalResult SizeResult, DstSizeResult;
- if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
- break;
- llvm::APSInt Size = SizeResult.Val.getInt();
- llvm::APSInt DstSize = DstSizeResult.Val.getInt();
- if (Size.ugt(DstSize))
- break;
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
- Builder.getInt8Ty());
- Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
- return RValue::get(Dest.getPointer());
- }
- case Builtin::BI__builtin_wmemcmp: {
- // The MSVC runtime library does not provide a definition of wmemcmp, so we
- // need an inline implementation.
- if (!getTarget().getTriple().isOSMSVCRT())
- break;
- llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
- Value *Dst = EmitScalarExpr(E->getArg(0));
- Value *Src = EmitScalarExpr(E->getArg(1));
- Value *Size = EmitScalarExpr(E->getArg(2));
- BasicBlock *Entry = Builder.GetInsertBlock();
- BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
- BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
- BasicBlock *Next = createBasicBlock("wmemcmp.next");
- BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
- Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
- Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
- EmitBlock(CmpGT);
- PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
- DstPhi->addIncoming(Dst, Entry);
- PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
- SrcPhi->addIncoming(Src, Entry);
- PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
- SizePhi->addIncoming(Size, Entry);
- CharUnits WCharAlign =
- getContext().getTypeAlignInChars(getContext().WCharTy);
- Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
- Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
- Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
- Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
- EmitBlock(CmpLT);
- Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
- Builder.CreateCondBr(DstLtSrc, Exit, Next);
- EmitBlock(Next);
- Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
- Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
- Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
- Value *NextSizeEq0 =
- Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
- Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
- DstPhi->addIncoming(NextDst, Next);
- SrcPhi->addIncoming(NextSrc, Next);
- SizePhi->addIncoming(NextSize, Next);
- EmitBlock(Exit);
- PHINode *Ret = Builder.CreatePHI(IntTy, 4);
- Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
- Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
- Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
- Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
- return RValue::get(Ret);
- }
- case Builtin::BI__builtin_dwarf_cfa: {
- // The offset in bytes from the first argument to the CFA.
- //
- // Why on earth is this in the frontend? Is there any reason at
- // all that the backend can't reasonably determine this while
- // lowering llvm.eh.dwarf.cfa()?
- //
- // TODO: If there's a satisfactory reason, add a target hook for
- // this instead of hard-coding 0, which is correct for most targets.
- int32_t Offset = 0;
- Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
- return RValue::get(Builder.CreateCall(F,
- llvm::ConstantInt::get(Int32Ty, Offset)));
- }
- case Builtin::BI__builtin_return_address: {
- Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
- getContext().UnsignedIntTy);
- Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
- return RValue::get(Builder.CreateCall(F, Depth));
- }
- case Builtin::BI_ReturnAddress: {
- Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
- return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
- }
- case Builtin::BI__builtin_frame_address: {
- Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
- getContext().UnsignedIntTy);
- Function *F = CGM.getIntrinsic(Intrinsic::frameaddress);
- return RValue::get(Builder.CreateCall(F, Depth));
- }
- case Builtin::BI__builtin_extract_return_addr: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_frob_return_addr: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
- return RValue::get(Result);
- }
- case Builtin::BI__builtin_dwarf_sp_column: {
- llvm::IntegerType *Ty
- = cast<llvm::IntegerType>(ConvertType(E->getType()));
- int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
- if (Column == -1) {
- CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
- return RValue::get(llvm::UndefValue::get(Ty));
- }
- return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
- }
- case Builtin::BI__builtin_init_dwarf_reg_size_table: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
- CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
- return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
- }
- case Builtin::BI__builtin_eh_return: {
- Value *Int = EmitScalarExpr(E->getArg(0));
- Value *Ptr = EmitScalarExpr(E->getArg(1));
- llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
- assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
- "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
- Function *F =
- CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
- : Intrinsic::eh_return_i64);
- Builder.CreateCall(F, {Int, Ptr});
- Builder.CreateUnreachable();
- // We do need to preserve an insertion point.
- EmitBlock(createBasicBlock("builtin_eh_return.cont"));
- return RValue::get(nullptr);
- }
- case Builtin::BI__builtin_unwind_init: {
- Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
- return RValue::get(Builder.CreateCall(F));
- }
- case Builtin::BI__builtin_extend_pointer: {
- // Extends a pointer to the size of an _Unwind_Word, which is
- // uint64_t on all platforms. Generally this gets poked into a
- // register and eventually used as an address, so if the
- // addressing registers are wider than pointers and the platform
- // doesn't implicitly ignore high-order bits when doing
- // addressing, we need to make sure we zext / sext based on
- // the platform's expectations.
- //
- // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
- // Cast the pointer to intptr_t.
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
- // If that's 64 bits, we're done.
- if (IntPtrTy->getBitWidth() == 64)
- return RValue::get(Result);
- // Otherwise, ask the codegen data what to do.
- if (getTargetHooks().extendPointerWithSExt())
- return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
- else
- return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
- }
- case Builtin::BI__builtin_setjmp: {
- // Buffer is a void**.
- Address Buf = EmitPointerWithAlignment(E->getArg(0));
- // Store the frame pointer to the setjmp buffer.
- Value *FrameAddr =
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
- ConstantInt::get(Int32Ty, 0));
- Builder.CreateStore(FrameAddr, Buf);
- // Store the stack pointer to the setjmp buffer.
- Value *StackAddr =
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
- Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
- Builder.CreateStore(StackAddr, StackSaveSlot);
- // Call LLVM's EH setjmp, which is lightweight.
- Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
- Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
- return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
- }
- case Builtin::BI__builtin_longjmp: {
- Value *Buf = EmitScalarExpr(E->getArg(0));
- Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
- // Call LLVM's EH longjmp, which is lightweight.
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
- // longjmp doesn't return; mark this as unreachable.
- Builder.CreateUnreachable();
- // We do need to preserve an insertion point.
- EmitBlock(createBasicBlock("longjmp.cont"));
- return RValue::get(nullptr);
- }
- case Builtin::BI__builtin_launder: {
- const Expr *Arg = E->getArg(0);
- QualType ArgTy = Arg->getType()->getPointeeType();
- Value *Ptr = EmitScalarExpr(Arg);
- if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
- Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
- return RValue::get(Ptr);
- }
- case Builtin::BI__sync_fetch_and_add:
- case Builtin::BI__sync_fetch_and_sub:
- case Builtin::BI__sync_fetch_and_or:
- case Builtin::BI__sync_fetch_and_and:
- case Builtin::BI__sync_fetch_and_xor:
- case Builtin::BI__sync_fetch_and_nand:
- case Builtin::BI__sync_add_and_fetch:
- case Builtin::BI__sync_sub_and_fetch:
- case Builtin::BI__sync_and_and_fetch:
- case Builtin::BI__sync_or_and_fetch:
- case Builtin::BI__sync_xor_and_fetch:
- case Builtin::BI__sync_nand_and_fetch:
- case Builtin::BI__sync_val_compare_and_swap:
- case Builtin::BI__sync_bool_compare_and_swap:
- case Builtin::BI__sync_lock_test_and_set:
- case Builtin::BI__sync_lock_release:
- case Builtin::BI__sync_swap:
- llvm_unreachable("Shouldn't make it through sema");
- case Builtin::BI__sync_fetch_and_add_1:
- case Builtin::BI__sync_fetch_and_add_2:
- case Builtin::BI__sync_fetch_and_add_4:
- case Builtin::BI__sync_fetch_and_add_8:
- case Builtin::BI__sync_fetch_and_add_16:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
- case Builtin::BI__sync_fetch_and_sub_1:
- case Builtin::BI__sync_fetch_and_sub_2:
- case Builtin::BI__sync_fetch_and_sub_4:
- case Builtin::BI__sync_fetch_and_sub_8:
- case Builtin::BI__sync_fetch_and_sub_16:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
- case Builtin::BI__sync_fetch_and_or_1:
- case Builtin::BI__sync_fetch_and_or_2:
- case Builtin::BI__sync_fetch_and_or_4:
- case Builtin::BI__sync_fetch_and_or_8:
- case Builtin::BI__sync_fetch_and_or_16:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
- case Builtin::BI__sync_fetch_and_and_1:
- case Builtin::BI__sync_fetch_and_and_2:
- case Builtin::BI__sync_fetch_and_and_4:
- case Builtin::BI__sync_fetch_and_and_8:
- case Builtin::BI__sync_fetch_and_and_16:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
- case Builtin::BI__sync_fetch_and_xor_1:
- case Builtin::BI__sync_fetch_and_xor_2:
- case Builtin::BI__sync_fetch_and_xor_4:
- case Builtin::BI__sync_fetch_and_xor_8:
- case Builtin::BI__sync_fetch_and_xor_16:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
- case Builtin::BI__sync_fetch_and_nand_1:
- case Builtin::BI__sync_fetch_and_nand_2:
- case Builtin::BI__sync_fetch_and_nand_4:
- case Builtin::BI__sync_fetch_and_nand_8:
- case Builtin::BI__sync_fetch_and_nand_16:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
- // Clang extensions: not overloaded yet.
- case Builtin::BI__sync_fetch_and_min:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
- case Builtin::BI__sync_fetch_and_max:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
- case Builtin::BI__sync_fetch_and_umin:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
- case Builtin::BI__sync_fetch_and_umax:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
- case Builtin::BI__sync_add_and_fetch_1:
- case Builtin::BI__sync_add_and_fetch_2:
- case Builtin::BI__sync_add_and_fetch_4:
- case Builtin::BI__sync_add_and_fetch_8:
- case Builtin::BI__sync_add_and_fetch_16:
- return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
- llvm::Instruction::Add);
- case Builtin::BI__sync_sub_and_fetch_1:
- case Builtin::BI__sync_sub_and_fetch_2:
- case Builtin::BI__sync_sub_and_fetch_4:
- case Builtin::BI__sync_sub_and_fetch_8:
- case Builtin::BI__sync_sub_and_fetch_16:
- return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
- llvm::Instruction::Sub);
- case Builtin::BI__sync_and_and_fetch_1:
- case Builtin::BI__sync_and_and_fetch_2:
- case Builtin::BI__sync_and_and_fetch_4:
- case Builtin::BI__sync_and_and_fetch_8:
- case Builtin::BI__sync_and_and_fetch_16:
- return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
- llvm::Instruction::And);
- case Builtin::BI__sync_or_and_fetch_1:
- case Builtin::BI__sync_or_and_fetch_2:
- case Builtin::BI__sync_or_and_fetch_4:
- case Builtin::BI__sync_or_and_fetch_8:
- case Builtin::BI__sync_or_and_fetch_16:
- return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
- llvm::Instruction::Or);
- case Builtin::BI__sync_xor_and_fetch_1:
- case Builtin::BI__sync_xor_and_fetch_2:
- case Builtin::BI__sync_xor_and_fetch_4:
- case Builtin::BI__sync_xor_and_fetch_8:
- case Builtin::BI__sync_xor_and_fetch_16:
- return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
- llvm::Instruction::Xor);
- case Builtin::BI__sync_nand_and_fetch_1:
- case Builtin::BI__sync_nand_and_fetch_2:
- case Builtin::BI__sync_nand_and_fetch_4:
- case Builtin::BI__sync_nand_and_fetch_8:
- case Builtin::BI__sync_nand_and_fetch_16:
- return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
- llvm::Instruction::And, true);
- case Builtin::BI__sync_val_compare_and_swap_1:
- case Builtin::BI__sync_val_compare_and_swap_2:
- case Builtin::BI__sync_val_compare_and_swap_4:
- case Builtin::BI__sync_val_compare_and_swap_8:
- case Builtin::BI__sync_val_compare_and_swap_16:
- return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
- case Builtin::BI__sync_bool_compare_and_swap_1:
- case Builtin::BI__sync_bool_compare_and_swap_2:
- case Builtin::BI__sync_bool_compare_and_swap_4:
- case Builtin::BI__sync_bool_compare_and_swap_8:
- case Builtin::BI__sync_bool_compare_and_swap_16:
- return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
- case Builtin::BI__sync_swap_1:
- case Builtin::BI__sync_swap_2:
- case Builtin::BI__sync_swap_4:
- case Builtin::BI__sync_swap_8:
- case Builtin::BI__sync_swap_16:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
- case Builtin::BI__sync_lock_test_and_set_1:
- case Builtin::BI__sync_lock_test_and_set_2:
- case Builtin::BI__sync_lock_test_and_set_4:
- case Builtin::BI__sync_lock_test_and_set_8:
- case Builtin::BI__sync_lock_test_and_set_16:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
- case Builtin::BI__sync_lock_release_1:
- case Builtin::BI__sync_lock_release_2:
- case Builtin::BI__sync_lock_release_4:
- case Builtin::BI__sync_lock_release_8:
- case Builtin::BI__sync_lock_release_16: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- QualType ElTy = E->getArg(0)->getType()->getPointeeType();
- CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
- StoreSize.getQuantity() * 8);
- Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::StoreInst *Store =
- Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
- StoreSize);
- Store->setAtomic(llvm::AtomicOrdering::Release);
- return RValue::get(nullptr);
- }
- case Builtin::BI__sync_synchronize: {
- // We assume this is supposed to correspond to a C++0x-style
- // sequentially-consistent fence (i.e. this is only usable for
- // synchronization, not device I/O or anything like that). This intrinsic
- // is really badly designed in the sense that in theory, there isn't
- // any way to safely use it... but in practice, it mostly works
- // to use it with non-atomic loads and stores to get acquire/release
- // semantics.
- Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
- return RValue::get(nullptr);
- }
- case Builtin::BI__builtin_nontemporal_load:
- return RValue::get(EmitNontemporalLoad(*this, E));
- case Builtin::BI__builtin_nontemporal_store:
- return RValue::get(EmitNontemporalStore(*this, E));
- case Builtin::BI__c11_atomic_is_lock_free:
- case Builtin::BI__atomic_is_lock_free: {
- // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
- // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
- // _Atomic(T) is always properly-aligned.
- const char *LibCallName = "__atomic_is_lock_free";
- CallArgList Args;
- Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
- getContext().getSizeType());
- if (BuiltinID == Builtin::BI__atomic_is_lock_free)
- Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
- getContext().VoidPtrTy);
- else
- Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
- getContext().VoidPtrTy);
- const CGFunctionInfo &FuncInfo =
- CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
- llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
- return EmitCall(FuncInfo, CGCallee::forDirect(Func),
- ReturnValueSlot(), Args);
- }
- case Builtin::BI__atomic_test_and_set: {
- // Look at the argument type to determine whether this is a volatile
- // operation. The parameter type is always volatile.
- QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
- bool Volatile =
- PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
- Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
- Value *NewVal = Builder.getInt8(1);
- Value *Order = EmitScalarExpr(E->getArg(1));
- if (isa<llvm::ConstantInt>(Order)) {
- int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
- AtomicRMWInst *Result = nullptr;
- switch (ord) {
- case 0: // memory_order_relaxed
- default: // invalid order
- Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::Monotonic);
- break;
- case 1: // memory_order_consume
- case 2: // memory_order_acquire
- Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::Acquire);
- break;
- case 3: // memory_order_release
- Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::Release);
- break;
- case 4: // memory_order_acq_rel
- Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::AcquireRelease);
- break;
- case 5: // memory_order_seq_cst
- Result = Builder.CreateAtomicRMW(
- llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::SequentiallyConsistent);
- break;
- }
- Result->setVolatile(Volatile);
- return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
- }
- llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
- llvm::BasicBlock *BBs[5] = {
- createBasicBlock("monotonic", CurFn),
- createBasicBlock("acquire", CurFn),
- createBasicBlock("release", CurFn),
- createBasicBlock("acqrel", CurFn),
- createBasicBlock("seqcst", CurFn)
- };
- llvm::AtomicOrdering Orders[5] = {
- llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
- llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
- llvm::AtomicOrdering::SequentiallyConsistent};
- Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
- llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
- Builder.SetInsertPoint(ContBB);
- PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
- for (unsigned i = 0; i < 5; ++i) {
- Builder.SetInsertPoint(BBs[i]);
- AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
- Ptr, NewVal, Orders[i]);
- RMW->setVolatile(Volatile);
- Result->addIncoming(RMW, BBs[i]);
- Builder.CreateBr(ContBB);
- }
- SI->addCase(Builder.getInt32(0), BBs[0]);
- SI->addCase(Builder.getInt32(1), BBs[1]);
- SI->addCase(Builder.getInt32(2), BBs[1]);
- SI->addCase(Builder.getInt32(3), BBs[2]);
- SI->addCase(Builder.getInt32(4), BBs[3]);
- SI->addCase(Builder.getInt32(5), BBs[4]);
- Builder.SetInsertPoint(ContBB);
- return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
- }
- case Builtin::BI__atomic_clear: {
- QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
- bool Volatile =
- PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- Address Ptr = EmitPointerWithAlignment(E->getArg(0));
- unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
- Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
- Value *NewVal = Builder.getInt8(0);
- Value *Order = EmitScalarExpr(E->getArg(1));
- if (isa<llvm::ConstantInt>(Order)) {
- int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
- StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
- switch (ord) {
- case 0: // memory_order_relaxed
- default: // invalid order
- Store->setOrdering(llvm::AtomicOrdering::Monotonic);
- break;
- case 3: // memory_order_release
- Store->setOrdering(llvm::AtomicOrdering::Release);
- break;
- case 5: // memory_order_seq_cst
- Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
- break;
- }
- return RValue::get(nullptr);
- }
- llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
- llvm::BasicBlock *BBs[3] = {
- createBasicBlock("monotonic", CurFn),
- createBasicBlock("release", CurFn),
- createBasicBlock("seqcst", CurFn)
- };
- llvm::AtomicOrdering Orders[3] = {
- llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
- llvm::AtomicOrdering::SequentiallyConsistent};
- Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
- llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
- for (unsigned i = 0; i < 3; ++i) {
- Builder.SetInsertPoint(BBs[i]);
- StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
- Store->setOrdering(Orders[i]);
- Builder.CreateBr(ContBB);
- }
- SI->addCase(Builder.getInt32(0), BBs[0]);
- SI->addCase(Builder.getInt32(3), BBs[1]);
- SI->addCase(Builder.getInt32(5), BBs[2]);
- Builder.SetInsertPoint(ContBB);
- return RValue::get(nullptr);
- }
- case Builtin::BI__atomic_thread_fence:
- case Builtin::BI__atomic_signal_fence:
- case Builtin::BI__c11_atomic_thread_fence:
- case Builtin::BI__c11_atomic_signal_fence: {
- llvm::SyncScope::ID SSID;
- if (BuiltinID == Builtin::BI__atomic_signal_fence ||
- BuiltinID == Builtin::BI__c11_atomic_signal_fence)
- SSID = llvm::SyncScope::SingleThread;
- else
- SSID = llvm::SyncScope::System;
- Value *Order = EmitScalarExpr(E->getArg(0));
- if (isa<llvm::ConstantInt>(Order)) {
- int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
- switch (ord) {
- case 0: // memory_order_relaxed
- default: // invalid order
- break;
- case 1: // memory_order_consume
- case 2: // memory_order_acquire
- Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
- break;
- case 3: // memory_order_release
- Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
- break;
- case 4: // memory_order_acq_rel
- Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
- break;
- case 5: // memory_order_seq_cst
- Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
- break;
- }
- return RValue::get(nullptr);
- }
- llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
- AcquireBB = createBasicBlock("acquire", CurFn);
- ReleaseBB = createBasicBlock("release", CurFn);
- AcqRelBB = createBasicBlock("acqrel", CurFn);
- SeqCstBB = createBasicBlock("seqcst", CurFn);
- llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
- Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
- llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
- Builder.SetInsertPoint(AcquireBB);
- Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
- Builder.CreateBr(ContBB);
- SI->addCase(Builder.getInt32(1), AcquireBB);
- SI->addCase(Builder.getInt32(2), AcquireBB);
- Builder.SetInsertPoint(ReleaseBB);
- Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
- Builder.CreateBr(ContBB);
- SI->addCase(Builder.getInt32(3), ReleaseBB);
- Builder.SetInsertPoint(AcqRelBB);
- Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
- Builder.CreateBr(ContBB);
- SI->addCase(Builder.getInt32(4), AcqRelBB);
- Builder.SetInsertPoint(SeqCstBB);
- Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
- Builder.CreateBr(ContBB);
- SI->addCase(Builder.getInt32(5), SeqCstBB);
- Builder.SetInsertPoint(ContBB);
- return RValue::get(nullptr);
- }
- case Builtin::BI__builtin_signbit:
- case Builtin::BI__builtin_signbitf:
- case Builtin::BI__builtin_signbitl: {
- return RValue::get(
- Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
- ConvertType(E->getType())));
- }
- case Builtin::BI__annotation: {
- // Re-encode each wide string to UTF8 and make an MDString.
- SmallVector<Metadata *, 1> Strings;
- for (const Expr *Arg : E->arguments()) {
- const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
- assert(Str->getCharByteWidth() == 2);
- StringRef WideBytes = Str->getBytes();
- std::string StrUtf8;
- if (!convertUTF16ToUTF8String(
- makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
- CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
- continue;
- }
- Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
- }
- // Build and MDTuple of MDStrings and emit the intrinsic call.
- llvm::Function *F =
- CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
- MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
- Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
- return RValue::getIgnored();
- }
- case Builtin::BI__builtin_annotation: {
- llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
- AnnVal->getType());
- // Get the annotation string, go through casts. Sema requires this to be a
- // non-wide string literal, potentially casted, so the cast<> is safe.
- const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
- StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
- return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
- }
- case Builtin::BI__builtin_addcb:
- case Builtin::BI__builtin_addcs:
- case Builtin::BI__builtin_addc:
- case Builtin::BI__builtin_addcl:
- case Builtin::BI__builtin_addcll:
- case Builtin::BI__builtin_subcb:
- case Builtin::BI__builtin_subcs:
- case Builtin::BI__builtin_subc:
- case Builtin::BI__builtin_subcl:
- case Builtin::BI__builtin_subcll: {
- // We translate all of these builtins from expressions of the form:
- // int x = ..., y = ..., carryin = ..., carryout, result;
- // result = __builtin_addc(x, y, carryin, &carryout);
- //
- // to LLVM IR of the form:
- //
- // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
- // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
- // %carry1 = extractvalue {i32, i1} %tmp1, 1
- // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
- // i32 %carryin)
- // %result = extractvalue {i32, i1} %tmp2, 0
- // %carry2 = extractvalue {i32, i1} %tmp2, 1
- // %tmp3 = or i1 %carry1, %carry2
- // %tmp4 = zext i1 %tmp3 to i32
- // store i32 %tmp4, i32* %carryout
- // Scalarize our inputs.
- llvm::Value *X = EmitScalarExpr(E->getArg(0));
- llvm::Value *Y = EmitScalarExpr(E->getArg(1));
- llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
- Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
- // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
- llvm::Intrinsic::ID IntrinsicId;
- switch (BuiltinID) {
- default: llvm_unreachable("Unknown multiprecision builtin id.");
- case Builtin::BI__builtin_addcb:
- case Builtin::BI__builtin_addcs:
- case Builtin::BI__builtin_addc:
- case Builtin::BI__builtin_addcl:
- case Builtin::BI__builtin_addcll:
- IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
- break;
- case Builtin::BI__builtin_subcb:
- case Builtin::BI__builtin_subcs:
- case Builtin::BI__builtin_subc:
- case Builtin::BI__builtin_subcl:
- case Builtin::BI__builtin_subcll:
- IntrinsicId = llvm::Intrinsic::usub_with_overflow;
- break;
- }
- // Construct our resulting LLVM IR expression.
- llvm::Value *Carry1;
- llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
- X, Y, Carry1);
- llvm::Value *Carry2;
- llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
- Sum1, Carryin, Carry2);
- llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
- X->getType());
- Builder.CreateStore(CarryOut, CarryOutPtr);
- return RValue::get(Sum2);
- }
- case Builtin::BI__builtin_add_overflow:
- case Builtin::BI__builtin_sub_overflow:
- case Builtin::BI__builtin_mul_overflow: {
- const clang::Expr *LeftArg = E->getArg(0);
- const clang::Expr *RightArg = E->getArg(1);
- const clang::Expr *ResultArg = E->getArg(2);
- clang::QualType ResultQTy =
- ResultArg->getType()->castAs<PointerType>()->getPointeeType();
- WidthAndSignedness LeftInfo =
- getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
- WidthAndSignedness RightInfo =
- getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
- WidthAndSignedness ResultInfo =
- getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
- // Handle mixed-sign multiplication as a special case, because adding
- // runtime or backend support for our generic irgen would be too expensive.
- if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
- return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
- RightInfo, ResultArg, ResultQTy,
- ResultInfo);
- WidthAndSignedness EncompassingInfo =
- EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
- llvm::Type *EncompassingLLVMTy =
- llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
- llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
- llvm::Intrinsic::ID IntrinsicId;
- switch (BuiltinID) {
- default:
- llvm_unreachable("Unknown overflow builtin id.");
- case Builtin::BI__builtin_add_overflow:
- IntrinsicId = EncompassingInfo.Signed
- ? llvm::Intrinsic::sadd_with_overflow
- : llvm::Intrinsic::uadd_with_overflow;
- break;
- case Builtin::BI__builtin_sub_overflow:
- IntrinsicId = EncompassingInfo.Signed
- ? llvm::Intrinsic::ssub_with_overflow
- : llvm::Intrinsic::usub_with_overflow;
- break;
- case Builtin::BI__builtin_mul_overflow:
- IntrinsicId = EncompassingInfo.Signed
- ? llvm::Intrinsic::smul_with_overflow
- : llvm::Intrinsic::umul_with_overflow;
- break;
- }
- llvm::Value *Left = EmitScalarExpr(LeftArg);
- llvm::Value *Right = EmitScalarExpr(RightArg);
- Address ResultPtr = EmitPointerWithAlignment(ResultArg);
- // Extend each operand to the encompassing type.
- Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
- Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
- // Perform the operation on the extended values.
- llvm::Value *Overflow, *Result;
- Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
- if (EncompassingInfo.Width > ResultInfo.Width) {
- // The encompassing type is wider than the result type, so we need to
- // truncate it.
- llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
- // To see if the truncation caused an overflow, we will extend
- // the result and then compare it to the original result.
- llvm::Value *ResultTruncExt = Builder.CreateIntCast(
- ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
- llvm::Value *TruncationOverflow =
- Builder.CreateICmpNE(Result, ResultTruncExt);
- Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
- Result = ResultTrunc;
- }
- // Finally, store the result using the pointer.
- bool isVolatile =
- ResultArg->getType()->getPointeeType().isVolatileQualified();
- Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
- return RValue::get(Overflow);
- }
- case Builtin::BI__builtin_uadd_overflow:
- case Builtin::BI__builtin_uaddl_overflow:
- case Builtin::BI__builtin_uaddll_overflow:
- case Builtin::BI__builtin_usub_overflow:
- case Builtin::BI__builtin_usubl_overflow:
- case Builtin::BI__builtin_usubll_overflow:
- case Builtin::BI__builtin_umul_overflow:
- case Builtin::BI__builtin_umull_overflow:
- case Builtin::BI__builtin_umulll_overflow:
- case Builtin::BI__builtin_sadd_overflow:
- case Builtin::BI__builtin_saddl_overflow:
- case Builtin::BI__builtin_saddll_overflow:
- case Builtin::BI__builtin_ssub_overflow:
- case Builtin::BI__builtin_ssubl_overflow:
- case Builtin::BI__builtin_ssubll_overflow:
- case Builtin::BI__builtin_smul_overflow:
- case Builtin::BI__builtin_smull_overflow:
- case Builtin::BI__builtin_smulll_overflow: {
- // We translate all of these builtins directly to the relevant llvm IR node.
- // Scalarize our inputs.
- llvm::Value *X = EmitScalarExpr(E->getArg(0));
- llvm::Value *Y = EmitScalarExpr(E->getArg(1));
- Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
- // Decide which of the overflow intrinsics we are lowering to:
- llvm::Intrinsic::ID IntrinsicId;
- switch (BuiltinID) {
- default: llvm_unreachable("Unknown overflow builtin id.");
- case Builtin::BI__builtin_uadd_overflow:
- case Builtin::BI__builtin_uaddl_overflow:
- case Builtin::BI__builtin_uaddll_overflow:
- IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
- break;
- case Builtin::BI__builtin_usub_overflow:
- case Builtin::BI__builtin_usubl_overflow:
- case Builtin::BI__builtin_usubll_overflow:
- IntrinsicId = llvm::Intrinsic::usub_with_overflow;
- break;
- case Builtin::BI__builtin_umul_overflow:
- case Builtin::BI__builtin_umull_overflow:
- case Builtin::BI__builtin_umulll_overflow:
- IntrinsicId = llvm::Intrinsic::umul_with_overflow;
- break;
- case Builtin::BI__builtin_sadd_overflow:
- case Builtin::BI__builtin_saddl_overflow:
- case Builtin::BI__builtin_saddll_overflow:
- IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
- break;
- case Builtin::BI__builtin_ssub_overflow:
- case Builtin::BI__builtin_ssubl_overflow:
- case Builtin::BI__builtin_ssubll_overflow:
- IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
- break;
- case Builtin::BI__builtin_smul_overflow:
- case Builtin::BI__builtin_smull_overflow:
- case Builtin::BI__builtin_smulll_overflow:
- IntrinsicId = llvm::Intrinsic::smul_with_overflow;
- break;
- }
- llvm::Value *Carry;
- llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
- Builder.CreateStore(Sum, SumOutPtr);
- return RValue::get(Carry);
- }
- case Builtin::BI__builtin_addressof:
- return RValue::get(EmitLValue(E->getArg(0)).getPointer());
- case Builtin::BI__builtin_operator_new:
- return EmitBuiltinNewDeleteCall(
- E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
- case Builtin::BI__builtin_operator_delete:
- return EmitBuiltinNewDeleteCall(
- E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
- case Builtin::BI__noop:
- // __noop always evaluates to an integer literal zero.
- return RValue::get(ConstantInt::get(IntTy, 0));
- case Builtin::BI__builtin_call_with_static_chain: {
- const CallExpr *Call = cast<CallExpr>(E->getArg(0));
- const Expr *Chain = E->getArg(1);
- return EmitCall(Call->getCallee()->getType(),
- EmitCallee(Call->getCallee()), Call, ReturnValue,
- EmitScalarExpr(Chain));
- }
- case Builtin::BI_InterlockedExchange8:
- case Builtin::BI_InterlockedExchange16:
- case Builtin::BI_InterlockedExchange:
- case Builtin::BI_InterlockedExchangePointer:
- return RValue::get(
- EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
- case Builtin::BI_InterlockedCompareExchangePointer:
- case Builtin::BI_InterlockedCompareExchangePointer_nf: {
- llvm::Type *RTy;
- llvm::IntegerType *IntType =
- IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(E->getType()));
- llvm::Type *IntPtrType = IntType->getPointerTo();
- llvm::Value *Destination =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
- llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
- RTy = Exchange->getType();
- Exchange = Builder.CreatePtrToInt(Exchange, IntType);
- llvm::Value *Comparand =
- Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
- auto Ordering =
- BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
- AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
- auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
- Ordering, Ordering);
- Result->setVolatile(true);
- return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
- 0),
- RTy));
- }
- case Builtin::BI_InterlockedCompareExchange8:
- case Builtin::BI_InterlockedCompareExchange16:
- case Builtin::BI_InterlockedCompareExchange:
- case Builtin::BI_InterlockedCompareExchange64:
- return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
- case Builtin::BI_InterlockedIncrement16:
- case Builtin::BI_InterlockedIncrement:
- return RValue::get(
- EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
- case Builtin::BI_InterlockedDecrement16:
- case Builtin::BI_InterlockedDecrement:
- return RValue::get(
- EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
- case Builtin::BI_InterlockedAnd8:
- case Builtin::BI_InterlockedAnd16:
- case Builtin::BI_InterlockedAnd:
- return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
- case Builtin::BI_InterlockedExchangeAdd8:
- case Builtin::BI_InterlockedExchangeAdd16:
- case Builtin::BI_InterlockedExchangeAdd:
- return RValue::get(
- EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
- case Builtin::BI_InterlockedExchangeSub8:
- case Builtin::BI_InterlockedExchangeSub16:
- case Builtin::BI_InterlockedExchangeSub:
- return RValue::get(
- EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
- case Builtin::BI_InterlockedOr8:
- case Builtin::BI_InterlockedOr16:
- case Builtin::BI_InterlockedOr:
- return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
- case Builtin::BI_InterlockedXor8:
- case Builtin::BI_InterlockedXor16:
- case Builtin::BI_InterlockedXor:
- return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
- case Builtin::BI_bittest64:
- case Builtin::BI_bittest:
- case Builtin::BI_bittestandcomplement64:
- case Builtin::BI_bittestandcomplement:
- case Builtin::BI_bittestandreset64:
- case Builtin::BI_bittestandreset:
- case Builtin::BI_bittestandset64:
- case Builtin::BI_bittestandset:
- case Builtin::BI_interlockedbittestandreset:
- case Builtin::BI_interlockedbittestandreset64:
- case Builtin::BI_interlockedbittestandset64:
- case Builtin::BI_interlockedbittestandset:
- case Builtin::BI_interlockedbittestandset_acq:
- case Builtin::BI_interlockedbittestandset_rel:
- case Builtin::BI_interlockedbittestandset_nf:
- case Builtin::BI_interlockedbittestandreset_acq:
- case Builtin::BI_interlockedbittestandreset_rel:
- case Builtin::BI_interlockedbittestandreset_nf:
- return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
- // These builtins exist to emit regular volatile loads and stores not
- // affected by the -fms-volatile setting.
- case Builtin::BI__iso_volatile_load8:
- case Builtin::BI__iso_volatile_load16:
- case Builtin::BI__iso_volatile_load32:
- case Builtin::BI__iso_volatile_load64:
- return RValue::get(EmitISOVolatileLoad(*this, E));
- case Builtin::BI__iso_volatile_store8:
- case Builtin::BI__iso_volatile_store16:
- case Builtin::BI__iso_volatile_store32:
- case Builtin::BI__iso_volatile_store64:
- return RValue::get(EmitISOVolatileStore(*this, E));
- case Builtin::BI__exception_code:
- case Builtin::BI_exception_code:
- return RValue::get(EmitSEHExceptionCode());
- case Builtin::BI__exception_info:
- case Builtin::BI_exception_info:
- return RValue::get(EmitSEHExceptionInfo());
- case Builtin::BI__abnormal_termination:
- case Builtin::BI_abnormal_termination:
- return RValue::get(EmitSEHAbnormalTermination());
- case Builtin::BI_setjmpex:
- if (getTarget().getTriple().isOSMSVCRT())
- return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
- break;
- case Builtin::BI_setjmp:
- if (getTarget().getTriple().isOSMSVCRT()) {
- if (getTarget().getTriple().getArch() == llvm::Triple::x86)
- return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
- else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
- return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
- return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
- }
- break;
- case Builtin::BI__GetExceptionInfo: {
- if (llvm::GlobalVariable *GV =
- CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
- return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
- break;
- }
- case Builtin::BI__fastfail:
- return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
- case Builtin::BI__builtin_coro_size: {
- auto & Context = getContext();
- auto SizeTy = Context.getSizeType();
- auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
- Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
- return RValue::get(Builder.CreateCall(F));
- }
- case Builtin::BI__builtin_coro_id:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
- case Builtin::BI__builtin_coro_promise:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
- case Builtin::BI__builtin_coro_resume:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
- case Builtin::BI__builtin_coro_frame:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
- case Builtin::BI__builtin_coro_noop:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
- case Builtin::BI__builtin_coro_free:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
- case Builtin::BI__builtin_coro_destroy:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
- case Builtin::BI__builtin_coro_done:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
- case Builtin::BI__builtin_coro_alloc:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
- case Builtin::BI__builtin_coro_begin:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
- case Builtin::BI__builtin_coro_end:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
- case Builtin::BI__builtin_coro_suspend:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
- case Builtin::BI__builtin_coro_param:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
- // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
- case Builtin::BIread_pipe:
- case Builtin::BIwrite_pipe: {
- Value *Arg0 = EmitScalarExpr(E->getArg(0)),
- *Arg1 = EmitScalarExpr(E->getArg(1));
- CGOpenCLRuntime OpenCLRT(CGM);
- Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
- Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
- // Type of the generic packet parameter.
- unsigned GenericAS =
- getContext().getTargetAddressSpace(LangAS::opencl_generic);
- llvm::Type *I8PTy = llvm::PointerType::get(
- llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
- // Testing which overloaded version we should generate the call for.
- if (2U == E->getNumArgs()) {
- const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
- : "__write_pipe_2";
- // Creating a generic function type to be able to call with any builtin or
- // user defined type.
- llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, BCast, PacketSize, PacketAlign}));
- } else {
- assert(4 == E->getNumArgs() &&
- "Illegal number of parameters to pipe function");
- const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
- : "__write_pipe_4";
- llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
- Int32Ty, Int32Ty};
- Value *Arg2 = EmitScalarExpr(E->getArg(2)),
- *Arg3 = EmitScalarExpr(E->getArg(3));
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
- // We know the third argument is an integer type, but we may need to cast
- // it to i32.
- if (Arg2->getType() != Int32Ty)
- Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
- return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
- }
- }
- // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
- // functions
- case Builtin::BIreserve_read_pipe:
- case Builtin::BIreserve_write_pipe:
- case Builtin::BIwork_group_reserve_read_pipe:
- case Builtin::BIwork_group_reserve_write_pipe:
- case Builtin::BIsub_group_reserve_read_pipe:
- case Builtin::BIsub_group_reserve_write_pipe: {
- // Composing the mangled name for the function.
- const char *Name;
- if (BuiltinID == Builtin::BIreserve_read_pipe)
- Name = "__reserve_read_pipe";
- else if (BuiltinID == Builtin::BIreserve_write_pipe)
- Name = "__reserve_write_pipe";
- else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
- Name = "__work_group_reserve_read_pipe";
- else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
- Name = "__work_group_reserve_write_pipe";
- else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
- Name = "__sub_group_reserve_read_pipe";
- else
- Name = "__sub_group_reserve_write_pipe";
- Value *Arg0 = EmitScalarExpr(E->getArg(0)),
- *Arg1 = EmitScalarExpr(E->getArg(1));
- llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
- CGOpenCLRuntime OpenCLRT(CGM);
- Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
- Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
- // Building the generic function prototype.
- llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- // We know the second argument is an integer type, but we may need to cast
- // it to i32.
- if (Arg1->getType() != Int32Ty)
- Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, Arg1, PacketSize, PacketAlign}));
- }
- // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
- // functions
- case Builtin::BIcommit_read_pipe:
- case Builtin::BIcommit_write_pipe:
- case Builtin::BIwork_group_commit_read_pipe:
- case Builtin::BIwork_group_commit_write_pipe:
- case Builtin::BIsub_group_commit_read_pipe:
- case Builtin::BIsub_group_commit_write_pipe: {
- const char *Name;
- if (BuiltinID == Builtin::BIcommit_read_pipe)
- Name = "__commit_read_pipe";
- else if (BuiltinID == Builtin::BIcommit_write_pipe)
- Name = "__commit_write_pipe";
- else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
- Name = "__work_group_commit_read_pipe";
- else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
- Name = "__work_group_commit_write_pipe";
- else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
- Name = "__sub_group_commit_read_pipe";
- else
- Name = "__sub_group_commit_write_pipe";
- Value *Arg0 = EmitScalarExpr(E->getArg(0)),
- *Arg1 = EmitScalarExpr(E->getArg(1));
- CGOpenCLRuntime OpenCLRT(CGM);
- Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
- Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
- // Building the generic function prototype.
- llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, Arg1, PacketSize, PacketAlign}));
- }
- // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
- case Builtin::BIget_pipe_num_packets:
- case Builtin::BIget_pipe_max_packets: {
- const char *BaseName;
- const PipeType *PipeTy = E->getArg(0)->getType()->getAs<PipeType>();
- if (BuiltinID == Builtin::BIget_pipe_num_packets)
- BaseName = "__get_pipe_num_packets";
- else
- BaseName = "__get_pipe_max_packets";
- auto Name = std::string(BaseName) +
- std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
- // Building the generic function prototype.
- Value *Arg0 = EmitScalarExpr(E->getArg(0));
- CGOpenCLRuntime OpenCLRT(CGM);
- Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
- Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
- llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- {Arg0, PacketSize, PacketAlign}));
- }
- // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
- case Builtin::BIto_global:
- case Builtin::BIto_local:
- case Builtin::BIto_private: {
- auto Arg0 = EmitScalarExpr(E->getArg(0));
- auto NewArgT = llvm::PointerType::get(Int8Ty,
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
- auto NewRetT = llvm::PointerType::get(Int8Ty,
- CGM.getContext().getTargetAddressSpace(
- E->getType()->getPointeeType().getAddressSpace()));
- auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
- llvm::Value *NewArg;
- if (Arg0->getType()->getPointerAddressSpace() !=
- NewArgT->getPointerAddressSpace())
- NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
- else
- NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
- auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
- auto NewCall =
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
- return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
- ConvertType(E->getType())));
- }
- // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
- // It contains four different overload formats specified in Table 6.13.17.1.
- case Builtin::BIenqueue_kernel: {
- StringRef Name; // Generated function call name
- unsigned NumArgs = E->getNumArgs();
- llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
- llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
- getContext().getTargetAddressSpace(LangAS::opencl_generic));
- llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
- llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
- LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
- llvm::Value *Range = NDRangeL.getAddress().getPointer();
- llvm::Type *RangeTy = NDRangeL.getAddress().getType();
- if (NumArgs == 4) {
- // The most basic form of the call with parameters:
- // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
- Name = "__enqueue_kernel_basic";
- llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
- GenericVoidPtrTy};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- auto Info =
- CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
- llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
- llvm::Value *Block =
- Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- AttrBuilder B;
- B.addByValAttr(NDRangeL.getAddress().getElementType());
- llvm::AttributeList ByValAttrSet =
- llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
- auto RTCall =
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
- {Queue, Flags, Range, Kernel, Block});
- RTCall->setAttributes(ByValAttrSet);
- return RValue::get(RTCall);
- }
- assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
- // Create a temporary array to hold the sizes of local pointer arguments
- // for the block. \p First is the position of the first size argument.
- auto CreateArrayForSizeVar = [=](unsigned First)
- -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
- llvm::APInt ArraySize(32, NumArgs - First);
- QualType SizeArrayTy = getContext().getConstantArrayType(
- getContext().getSizeType(), ArraySize, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
- llvm::Value *TmpPtr = Tmp.getPointer();
- llvm::Value *TmpSize = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
- llvm::Value *ElemPtr;
- // Each of the following arguments specifies the size of the corresponding
- // argument passed to the enqueued block.
- auto *Zero = llvm::ConstantInt::get(IntTy, 0);
- for (unsigned I = First; I < NumArgs; ++I) {
- auto *Index = llvm::ConstantInt::get(IntTy, I - First);
- auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
- if (I == First)
- ElemPtr = GEP;
- auto *V =
- Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
- Builder.CreateAlignedStore(
- V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
- }
- return std::tie(ElemPtr, TmpSize, TmpPtr);
- };
- // Could have events and/or varargs.
- if (E->getArg(3)->getType()->isBlockPointerType()) {
- // No events passed, but has variadic arguments.
- Name = "__enqueue_kernel_varargs";
- auto Info =
- CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
- llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
- auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
- std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
- // Create a vector of the arguments, as well as a constant value to
- // express to the runtime the number of variadic arguments.
- std::vector<llvm::Value *> Args = {
- Queue, Flags, Range,
- Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4),
- ElemPtr};
- std::vector<llvm::Type *> ArgTys = {
- QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
- GenericVoidPtrTy, IntTy, ElemPtr->getType()};
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- auto Call =
- RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
- if (TmpSize)
- EmitLifetimeEnd(TmpSize, TmpPtr);
- return Call;
- }
- // Any calls now have event arguments passed.
- if (NumArgs >= 7) {
- llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
- llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
- llvm::Value *NumEvents =
- Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
- // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
- // to be a null pointer constant (including `0` literal), we can take it
- // into account and emit null pointer directly.
- llvm::Value *EventWaitList = nullptr;
- if (E->getArg(4)->isNullPointerConstant(
- getContext(), Expr::NPC_ValueDependentIsNotNull)) {
- EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
- } else {
- EventWaitList = E->getArg(4)->getType()->isArrayType()
- ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
- : EmitScalarExpr(E->getArg(4));
- // Convert to generic address space.
- EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
- }
- llvm::Value *EventRet = nullptr;
- if (E->getArg(5)->isNullPointerConstant(
- getContext(), Expr::NPC_ValueDependentIsNotNull)) {
- EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
- } else {
- EventRet =
- Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
- }
- auto Info =
- CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
- llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
- llvm::Value *Block =
- Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- std::vector<llvm::Type *> ArgTys = {
- QueueTy, Int32Ty, RangeTy, Int32Ty,
- EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
- std::vector<llvm::Value *> Args = {Queue, Flags, Range,
- NumEvents, EventWaitList, EventRet,
- Kernel, Block};
- if (NumArgs == 7) {
- // Has events but no variadics.
- Name = "__enqueue_kernel_basic_events";
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
- }
- // Has event info and variadics
- // Pass the number of variadics to the runtime function too.
- Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
- ArgTys.push_back(Int32Ty);
- Name = "__enqueue_kernel_events_varargs";
- llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
- std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
- Args.push_back(ElemPtr);
- ArgTys.push_back(ElemPtr->getType());
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- auto Call =
- RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
- if (TmpSize)
- EmitLifetimeEnd(TmpSize, TmpPtr);
- return Call;
- }
- LLVM_FALLTHROUGH;
- }
- // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
- // parameter.
- case Builtin::BIget_kernel_work_group_size: {
- llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
- getContext().getTargetAddressSpace(LangAS::opencl_generic));
- auto Info =
- CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
- Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
- false),
- "__get_kernel_work_group_size_impl"),
- {Kernel, Arg}));
- }
- case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
- llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
- getContext().getTargetAddressSpace(LangAS::opencl_generic));
- auto Info =
- CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
- Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
- false),
- "__get_kernel_preferred_work_group_size_multiple_impl"),
- {Kernel, Arg}));
- }
- case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
- case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
- llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
- getContext().getTargetAddressSpace(LangAS::opencl_generic));
- LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
- llvm::Value *NDRange = NDRangeL.getAddress().getPointer();
- auto Info =
- CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
- Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- const char *Name =
- BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
- ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
- : "__get_kernel_sub_group_count_for_ndrange_impl";
- return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(
- IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
- false),
- Name),
- {NDRange, Kernel, Block}));
- }
- case Builtin::BI__builtin_store_half:
- case Builtin::BI__builtin_store_halff: {
- Value *Val = EmitScalarExpr(E->getArg(0));
- Address Address = EmitPointerWithAlignment(E->getArg(1));
- Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
- return RValue::get(Builder.CreateStore(HalfVal, Address));
- }
- case Builtin::BI__builtin_load_half: {
- Address Address = EmitPointerWithAlignment(E->getArg(0));
- Value *HalfVal = Builder.CreateLoad(Address);
- return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
- }
- case Builtin::BI__builtin_load_halff: {
- Address Address = EmitPointerWithAlignment(E->getArg(0));
- Value *HalfVal = Builder.CreateLoad(Address);
- return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
- }
- case Builtin::BIprintf:
- if (getTarget().getTriple().isNVPTX())
- return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
- break;
- case Builtin::BI__builtin_canonicalize:
- case Builtin::BI__builtin_canonicalizef:
- case Builtin::BI__builtin_canonicalizel:
- return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
- case Builtin::BI__builtin_thread_pointer: {
- if (!getContext().getTargetInfo().isTLSSupported())
- CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
- // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
- break;
- }
- case Builtin::BI__builtin_os_log_format:
- return emitBuiltinOSLogFormat(*E);
- case Builtin::BI__xray_customevent: {
- if (!ShouldXRayInstrumentFunction())
- return RValue::getIgnored();
- if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
- XRayInstrKind::Custom))
- return RValue::getIgnored();
- if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
- if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
- return RValue::getIgnored();
- Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
- auto FTy = F->getFunctionType();
- auto Arg0 = E->getArg(0);
- auto Arg0Val = EmitScalarExpr(Arg0);
- auto Arg0Ty = Arg0->getType();
- auto PTy0 = FTy->getParamType(0);
- if (PTy0 != Arg0Val->getType()) {
- if (Arg0Ty->isArrayType())
- Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
- else
- Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
- }
- auto Arg1 = EmitScalarExpr(E->getArg(1));
- auto PTy1 = FTy->getParamType(1);
- if (PTy1 != Arg1->getType())
- Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
- return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
- }
- case Builtin::BI__xray_typedevent: {
- // TODO: There should be a way to always emit events even if the current
- // function is not instrumented. Losing events in a stream can cripple
- // a trace.
- if (!ShouldXRayInstrumentFunction())
- return RValue::getIgnored();
- if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
- XRayInstrKind::Typed))
- return RValue::getIgnored();
- if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
- if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
- return RValue::getIgnored();
- Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
- auto FTy = F->getFunctionType();
- auto Arg0 = EmitScalarExpr(E->getArg(0));
- auto PTy0 = FTy->getParamType(0);
- if (PTy0 != Arg0->getType())
- Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
- auto Arg1 = E->getArg(1);
- auto Arg1Val = EmitScalarExpr(Arg1);
- auto Arg1Ty = Arg1->getType();
- auto PTy1 = FTy->getParamType(1);
- if (PTy1 != Arg1Val->getType()) {
- if (Arg1Ty->isArrayType())
- Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
- else
- Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
- }
- auto Arg2 = EmitScalarExpr(E->getArg(2));
- auto PTy2 = FTy->getParamType(2);
- if (PTy2 != Arg2->getType())
- Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
- return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
- }
- case Builtin::BI__builtin_ms_va_start:
- case Builtin::BI__builtin_ms_va_end:
- return RValue::get(
- EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
- BuiltinID == Builtin::BI__builtin_ms_va_start));
- case Builtin::BI__builtin_ms_va_copy: {
- // Lower this manually. We can't reliably determine whether or not any
- // given va_copy() is for a Win64 va_list from the calling convention
- // alone, because it's legal to do this from a System V ABI function.
- // With opaque pointer types, we won't have enough information in LLVM
- // IR to determine this from the argument types, either. Best to do it
- // now, while we have enough information.
- Address DestAddr = EmitMSVAListRef(E->getArg(0));
- Address SrcAddr = EmitMSVAListRef(E->getArg(1));
- llvm::Type *BPP = Int8PtrPtrTy;
- DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
- DestAddr.getAlignment());
- SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
- SrcAddr.getAlignment());
- Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
- return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
- }
- }
- // If this is an alias for a lib function (e.g. __builtin_sin), emit
- // the call using the normal call path, but using the unmangled
- // version of the function name.
- if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
- return emitLibraryCall(*this, FD, E,
- CGM.getBuiltinLibFunction(FD, BuiltinID));
- // If this is a predefined lib function (e.g. malloc), emit the call
- // using exactly the normal call path.
- if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return emitLibraryCall(*this, FD, E,
- cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
- // Check that a call to a target specific builtin has the correct target
- // features.
- // This is down here to avoid non-target specific builtins, however, if
- // generic builtins start to require generic target features then we
- // can move this up to the beginning of the function.
- checkTargetFeatures(E, FD);
- if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
- LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
- // See if we have a target specific intrinsic.
- const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
- Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
- StringRef Prefix =
- llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
- if (!Prefix.empty()) {
- IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
- // NOTE we don't need to perform a compatibility flag check here since the
- // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
- // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
- if (IntrinsicID == Intrinsic::not_intrinsic)
- IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
- }
- if (IntrinsicID != Intrinsic::not_intrinsic) {
- SmallVector<Value*, 16> Args;
- // Find out if any arguments are required to be integer constant
- // expressions.
- unsigned ICEArguments = 0;
- ASTContext::GetBuiltinTypeError Error;
- getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
- assert(Error == ASTContext::GE_None && "Should not codegen an error");
- Function *F = CGM.getIntrinsic(IntrinsicID);
- llvm::FunctionType *FTy = F->getFunctionType();
- for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
- Value *ArgValue;
- // If this is a normal argument, just emit it as a scalar.
- if ((ICEArguments & (1 << i)) == 0) {
- ArgValue = EmitScalarExpr(E->getArg(i));
- } else {
- // If this is required to be a constant, constant fold it so that we
- // know that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
- assert(IsConst && "Constant arg isn't actually constant?");
- (void)IsConst;
- ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
- }
- // If the intrinsic arg type is different from the builtin arg type
- // we need to do a bit cast.
- llvm::Type *PTy = FTy->getParamType(i);
- if (PTy != ArgValue->getType()) {
- // XXX - vector of pointers?
- if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
- if (PtrTy->getAddressSpace() !=
- ArgValue->getType()->getPointerAddressSpace()) {
- ArgValue = Builder.CreateAddrSpaceCast(
- ArgValue,
- ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
- }
- }
- assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
- "Must be able to losslessly bit cast to param");
- ArgValue = Builder.CreateBitCast(ArgValue, PTy);
- }
- Args.push_back(ArgValue);
- }
- Value *V = Builder.CreateCall(F, Args);
- QualType BuiltinRetType = E->getType();
- llvm::Type *RetTy = VoidTy;
- if (!BuiltinRetType->isVoidType())
- RetTy = ConvertType(BuiltinRetType);
- if (RetTy != V->getType()) {
- // XXX - vector of pointers?
- if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
- if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
- V = Builder.CreateAddrSpaceCast(
- V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
- }
- }
- assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
- "Must be able to losslessly bit cast result type");
- V = Builder.CreateBitCast(V, RetTy);
- }
- return RValue::get(V);
- }
- // See if we have a target specific builtin that needs to be lowered.
- if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
- return RValue::get(V);
- ErrorUnsupported(E, "builtin function");
- // Unknown builtin, for now just dump it out and return undef.
- return GetUndefRValue(E->getType());
- }
- static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
- unsigned BuiltinID, const CallExpr *E,
- llvm::Triple::ArchType Arch) {
- switch (Arch) {
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- return CGF->EmitARMBuiltinExpr(BuiltinID, E, Arch);
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- return CGF->EmitX86BuiltinExpr(BuiltinID, E);
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
- case llvm::Triple::r600:
- case llvm::Triple::amdgcn:
- return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
- case llvm::Triple::systemz:
- return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
- case llvm::Triple::nvptx:
- case llvm::Triple::nvptx64:
- return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
- case llvm::Triple::hexagon:
- return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
- default:
- return nullptr;
- }
- }
- Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
- assert(getContext().getAuxTargetInfo() && "Missing aux target info");
- return EmitTargetArchBuiltinExpr(
- this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
- getContext().getAuxTargetInfo()->getTriple().getArch());
- }
- return EmitTargetArchBuiltinExpr(this, BuiltinID, E,
- getTarget().getTriple().getArch());
- }
- static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
- NeonTypeFlags TypeFlags,
- bool HasLegalHalfType=true,
- bool V1Ty=false) {
- int IsQuad = TypeFlags.isQuad();
- switch (TypeFlags.getEltType()) {
- case NeonTypeFlags::Int8:
- case NeonTypeFlags::Poly8:
- return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
- case NeonTypeFlags::Int16:
- case NeonTypeFlags::Poly16:
- return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
- case NeonTypeFlags::Float16:
- if (HasLegalHalfType)
- return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
- else
- return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
- case NeonTypeFlags::Int32:
- return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
- case NeonTypeFlags::Int64:
- case NeonTypeFlags::Poly64:
- return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
- case NeonTypeFlags::Poly128:
- // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
- // There is a lot of i128 and f128 API missing.
- // so we use v16i8 to represent poly128 and get pattern matched.
- return llvm::VectorType::get(CGF->Int8Ty, 16);
- case NeonTypeFlags::Float32:
- return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
- case NeonTypeFlags::Float64:
- return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
- }
- llvm_unreachable("Unknown vector element type!");
- }
- static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
- NeonTypeFlags IntTypeFlags) {
- int IsQuad = IntTypeFlags.isQuad();
- switch (IntTypeFlags.getEltType()) {
- case NeonTypeFlags::Int16:
- return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
- case NeonTypeFlags::Int32:
- return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
- case NeonTypeFlags::Int64:
- return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
- default:
- llvm_unreachable("Type can't be converted to floating-point!");
- }
- }
- Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
- unsigned nElts = V->getType()->getVectorNumElements();
- Value* SV = llvm::ConstantVector::getSplat(nElts, C);
- return Builder.CreateShuffleVector(V, V, SV, "lane");
- }
- Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
- const char *name,
- unsigned shift, bool rightshift) {
- unsigned j = 0;
- for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
- ai != ae; ++ai, ++j)
- if (shift > 0 && shift == j)
- Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
- else
- Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
- return Builder.CreateCall(F, Ops, name);
- }
- Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
- bool neg) {
- int SV = cast<ConstantInt>(V)->getSExtValue();
- return ConstantInt::get(Ty, neg ? -SV : SV);
- }
- // Right-shift a vector by a constant.
- Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
- llvm::Type *Ty, bool usgn,
- const char *name) {
- llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
- int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
- int EltSize = VTy->getScalarSizeInBits();
- Vec = Builder.CreateBitCast(Vec, Ty);
- // lshr/ashr are undefined when the shift amount is equal to the vector
- // element size.
- if (ShiftAmt == EltSize) {
- if (usgn) {
- // Right-shifting an unsigned value by its size yields 0.
- return llvm::ConstantAggregateZero::get(VTy);
- } else {
- // Right-shifting a signed value by its size is equivalent
- // to a shift of size-1.
- --ShiftAmt;
- Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
- }
- }
- Shift = EmitNeonShiftVector(Shift, Ty, false);
- if (usgn)
- return Builder.CreateLShr(Vec, Shift, name);
- else
- return Builder.CreateAShr(Vec, Shift, name);
- }
- enum {
- AddRetType = (1 << 0),
- Add1ArgType = (1 << 1),
- Add2ArgTypes = (1 << 2),
- VectorizeRetType = (1 << 3),
- VectorizeArgTypes = (1 << 4),
- InventFloatType = (1 << 5),
- UnsignedAlts = (1 << 6),
- Use64BitVectors = (1 << 7),
- Use128BitVectors = (1 << 8),
- Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
- VectorRet = AddRetType | VectorizeRetType,
- VectorRetGetArgs01 =
- AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
- FpCmpzModifiers =
- AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
- };
- namespace {
- struct NeonIntrinsicInfo {
- const char *NameHint;
- unsigned BuiltinID;
- unsigned LLVMIntrinsic;
- unsigned AltLLVMIntrinsic;
- unsigned TypeModifier;
- bool operator<(unsigned RHSBuiltinID) const {
- return BuiltinID < RHSBuiltinID;
- }
- bool operator<(const NeonIntrinsicInfo &TE) const {
- return BuiltinID < TE.BuiltinID;
- }
- };
- } // end anonymous namespace
- #define NEONMAP0(NameBase) \
- { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
- #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
- { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
- Intrinsic::LLVMIntrinsic, 0, TypeModifier }
- #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
- { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
- Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
- TypeModifier }
- static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
- NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
- NEONMAP1(vabs_v, arm_neon_vabs, 0),
- NEONMAP1(vabsq_v, arm_neon_vabs, 0),
- NEONMAP0(vaddhn_v),
- NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
- NEONMAP1(vaeseq_v, arm_neon_aese, 0),
- NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
- NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
- NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
- NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
- NEONMAP1(vcage_v, arm_neon_vacge, 0),
- NEONMAP1(vcageq_v, arm_neon_vacge, 0),
- NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
- NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
- NEONMAP1(vcale_v, arm_neon_vacge, 0),
- NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
- NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
- NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
- NEONMAP0(vceqz_v),
- NEONMAP0(vceqzq_v),
- NEONMAP0(vcgez_v),
- NEONMAP0(vcgezq_v),
- NEONMAP0(vcgtz_v),
- NEONMAP0(vcgtzq_v),
- NEONMAP0(vclez_v),
- NEONMAP0(vclezq_v),
- NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
- NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
- NEONMAP0(vcltz_v),
- NEONMAP0(vcltzq_v),
- NEONMAP1(vclz_v, ctlz, Add1ArgType),
- NEONMAP1(vclzq_v, ctlz, Add1ArgType),
- NEONMAP1(vcnt_v, ctpop, Add1ArgType),
- NEONMAP1(vcntq_v, ctpop, Add1ArgType),
- NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
- NEONMAP0(vcvt_f16_v),
- NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
- NEONMAP0(vcvt_f32_v),
- NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
- NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvt_s16_v),
- NEONMAP0(vcvt_s32_v),
- NEONMAP0(vcvt_s64_v),
- NEONMAP0(vcvt_u16_v),
- NEONMAP0(vcvt_u32_v),
- NEONMAP0(vcvt_u64_v),
- NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
- NEONMAP0(vcvtq_f16_v),
- NEONMAP0(vcvtq_f32_v),
- NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
- NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvtq_s16_v),
- NEONMAP0(vcvtq_s32_v),
- NEONMAP0(vcvtq_s64_v),
- NEONMAP0(vcvtq_u16_v),
- NEONMAP0(vcvtq_u32_v),
- NEONMAP0(vcvtq_u64_v),
- NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
- NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
- NEONMAP0(vext_v),
- NEONMAP0(vextq_v),
- NEONMAP0(vfma_v),
- NEONMAP0(vfmaq_v),
- NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
- NEONMAP0(vld1_dup_v),
- NEONMAP1(vld1_v, arm_neon_vld1, 0),
- NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
- NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
- NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
- NEONMAP0(vld1q_dup_v),
- NEONMAP1(vld1q_v, arm_neon_vld1, 0),
- NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
- NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
- NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
- NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
- NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
- NEONMAP1(vld2_v, arm_neon_vld2, 0),
- NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
- NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
- NEONMAP1(vld2q_v, arm_neon_vld2, 0),
- NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
- NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
- NEONMAP1(vld3_v, arm_neon_vld3, 0),
- NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
- NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
- NEONMAP1(vld3q_v, arm_neon_vld3, 0),
- NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
- NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
- NEONMAP1(vld4_v, arm_neon_vld4, 0),
- NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
- NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
- NEONMAP1(vld4q_v, arm_neon_vld4, 0),
- NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
- NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
- NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
- NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
- NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
- NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
- NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
- NEONMAP0(vmovl_v),
- NEONMAP0(vmovn_v),
- NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
- NEONMAP0(vmull_v),
- NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
- NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
- NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
- NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
- NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
- NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
- NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
- NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
- NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
- NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
- NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
- NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
- NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
- NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
- NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
- NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
- NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
- NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
- NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
- NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
- NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
- NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
- NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
- NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
- NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
- NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
- NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
- NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
- NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
- NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
- NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
- NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
- NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
- NEONMAP0(vrndi_v),
- NEONMAP0(vrndiq_v),
- NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
- NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
- NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
- NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
- NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
- NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
- NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
- NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
- NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
- NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
- NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
- NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
- NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
- NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
- NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
- NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
- NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
- NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
- NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
- NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
- NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
- NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
- NEONMAP0(vshl_n_v),
- NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
- NEONMAP0(vshll_n_v),
- NEONMAP0(vshlq_n_v),
- NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
- NEONMAP0(vshr_n_v),
- NEONMAP0(vshrn_n_v),
- NEONMAP0(vshrq_n_v),
- NEONMAP1(vst1_v, arm_neon_vst1, 0),
- NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
- NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
- NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
- NEONMAP1(vst1q_v, arm_neon_vst1, 0),
- NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
- NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
- NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
- NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
- NEONMAP1(vst2_v, arm_neon_vst2, 0),
- NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
- NEONMAP1(vst2q_v, arm_neon_vst2, 0),
- NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
- NEONMAP1(vst3_v, arm_neon_vst3, 0),
- NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
- NEONMAP1(vst3q_v, arm_neon_vst3, 0),
- NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
- NEONMAP1(vst4_v, arm_neon_vst4, 0),
- NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
- NEONMAP1(vst4q_v, arm_neon_vst4, 0),
- NEONMAP0(vsubhn_v),
- NEONMAP0(vtrn_v),
- NEONMAP0(vtrnq_v),
- NEONMAP0(vtst_v),
- NEONMAP0(vtstq_v),
- NEONMAP0(vuzp_v),
- NEONMAP0(vuzpq_v),
- NEONMAP0(vzip_v),
- NEONMAP0(vzipq_v)
- };
- static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
- NEONMAP1(vabs_v, aarch64_neon_abs, 0),
- NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
- NEONMAP0(vaddhn_v),
- NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
- NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
- NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
- NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
- NEONMAP1(vcage_v, aarch64_neon_facge, 0),
- NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
- NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
- NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
- NEONMAP1(vcale_v, aarch64_neon_facge, 0),
- NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
- NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
- NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
- NEONMAP0(vceqz_v),
- NEONMAP0(vceqzq_v),
- NEONMAP0(vcgez_v),
- NEONMAP0(vcgezq_v),
- NEONMAP0(vcgtz_v),
- NEONMAP0(vcgtzq_v),
- NEONMAP0(vclez_v),
- NEONMAP0(vclezq_v),
- NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
- NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
- NEONMAP0(vcltz_v),
- NEONMAP0(vcltzq_v),
- NEONMAP1(vclz_v, ctlz, Add1ArgType),
- NEONMAP1(vclzq_v, ctlz, Add1ArgType),
- NEONMAP1(vcnt_v, ctpop, Add1ArgType),
- NEONMAP1(vcntq_v, ctpop, Add1ArgType),
- NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
- NEONMAP0(vcvt_f16_v),
- NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
- NEONMAP0(vcvt_f32_v),
- NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvtq_f16_v),
- NEONMAP0(vcvtq_f32_v),
- NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
- NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
- NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
- NEONMAP0(vext_v),
- NEONMAP0(vextq_v),
- NEONMAP0(vfma_v),
- NEONMAP0(vfmaq_v),
- NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
- NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
- NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
- NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
- NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
- NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
- NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
- NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
- NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
- NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
- NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
- NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
- NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
- NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
- NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
- NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
- NEONMAP0(vmovl_v),
- NEONMAP0(vmovn_v),
- NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
- NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
- NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
- NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
- NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
- NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
- NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
- NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
- NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
- NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
- NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
- NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
- NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
- NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
- NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
- NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
- NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
- NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
- NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
- NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
- NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
- NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
- NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
- NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
- NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
- NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
- NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
- NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
- NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
- NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
- NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
- NEONMAP0(vrndi_v),
- NEONMAP0(vrndiq_v),
- NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
- NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
- NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
- NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
- NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
- NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
- NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
- NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
- NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
- NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
- NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
- NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
- NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
- NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
- NEONMAP0(vshl_n_v),
- NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
- NEONMAP0(vshll_n_v),
- NEONMAP0(vshlq_n_v),
- NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
- NEONMAP0(vshr_n_v),
- NEONMAP0(vshrn_n_v),
- NEONMAP0(vshrq_n_v),
- NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
- NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
- NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
- NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
- NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
- NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
- NEONMAP0(vsubhn_v),
- NEONMAP0(vtst_v),
- NEONMAP0(vtstq_v),
- };
- static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
- NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
- NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
- NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
- NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
- NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
- NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
- NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
- NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
- NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
- NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
- NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
- NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
- NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
- NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
- NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
- NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
- NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
- NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
- NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
- NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
- NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
- NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
- NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
- NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
- NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
- NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
- NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
- NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
- NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
- NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
- NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
- NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
- NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
- NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
- NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
- NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
- NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
- NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
- NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
- NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
- NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
- NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
- NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
- NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
- NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
- NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
- NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
- NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
- NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
- NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
- NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
- NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
- NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
- NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
- NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
- NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
- NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
- NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
- NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
- NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
- NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
- NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
- NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
- NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
- NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
- NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
- NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
- NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
- NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
- NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
- NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
- NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
- NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
- NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
- NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
- NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
- NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
- NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
- NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
- NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
- NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
- NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
- NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
- NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
- NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
- NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
- NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
- NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
- NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
- NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
- NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
- NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
- NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
- NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
- NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
- NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
- NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
- NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
- NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
- // FP16 scalar intrinisics go here.
- NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
- NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
- NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
- NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
- NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
- NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
- NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
- NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
- NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
- NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
- NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
- NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
- NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
- NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
- };
- #undef NEONMAP0
- #undef NEONMAP1
- #undef NEONMAP2
- static bool NEONSIMDIntrinsicsProvenSorted = false;
- static bool AArch64SIMDIntrinsicsProvenSorted = false;
- static bool AArch64SISDIntrinsicsProvenSorted = false;
- static const NeonIntrinsicInfo *
- findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
- unsigned BuiltinID, bool &MapProvenSorted) {
- #ifndef NDEBUG
- if (!MapProvenSorted) {
- assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
- MapProvenSorted = true;
- }
- #endif
- const NeonIntrinsicInfo *Builtin = llvm::lower_bound(IntrinsicMap, BuiltinID);
- if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
- return Builtin;
- return nullptr;
- }
- Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
- unsigned Modifier,
- llvm::Type *ArgType,
- const CallExpr *E) {
- int VectorSize = 0;
- if (Modifier & Use64BitVectors)
- VectorSize = 64;
- else if (Modifier & Use128BitVectors)
- VectorSize = 128;
- // Return type.
- SmallVector<llvm::Type *, 3> Tys;
- if (Modifier & AddRetType) {
- llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
- if (Modifier & VectorizeRetType)
- Ty = llvm::VectorType::get(
- Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
- Tys.push_back(Ty);
- }
- // Arguments.
- if (Modifier & VectorizeArgTypes) {
- int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
- ArgType = llvm::VectorType::get(ArgType, Elts);
- }
- if (Modifier & (Add1ArgType | Add2ArgTypes))
- Tys.push_back(ArgType);
- if (Modifier & Add2ArgTypes)
- Tys.push_back(ArgType);
- if (Modifier & InventFloatType)
- Tys.push_back(FloatTy);
- return CGM.getIntrinsic(IntrinsicID, Tys);
- }
- static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
- const NeonIntrinsicInfo &SISDInfo,
- SmallVectorImpl<Value *> &Ops,
- const CallExpr *E) {
- unsigned BuiltinID = SISDInfo.BuiltinID;
- unsigned int Int = SISDInfo.LLVMIntrinsic;
- unsigned Modifier = SISDInfo.TypeModifier;
- const char *s = SISDInfo.NameHint;
- switch (BuiltinID) {
- case NEON::BI__builtin_neon_vcled_s64:
- case NEON::BI__builtin_neon_vcled_u64:
- case NEON::BI__builtin_neon_vcles_f32:
- case NEON::BI__builtin_neon_vcled_f64:
- case NEON::BI__builtin_neon_vcltd_s64:
- case NEON::BI__builtin_neon_vcltd_u64:
- case NEON::BI__builtin_neon_vclts_f32:
- case NEON::BI__builtin_neon_vcltd_f64:
- case NEON::BI__builtin_neon_vcales_f32:
- case NEON::BI__builtin_neon_vcaled_f64:
- case NEON::BI__builtin_neon_vcalts_f32:
- case NEON::BI__builtin_neon_vcaltd_f64:
- // Only one direction of comparisons actually exist, cmle is actually a cmge
- // with swapped operands. The table gives us the right intrinsic but we
- // still need to do the swap.
- std::swap(Ops[0], Ops[1]);
- break;
- }
- assert(Int && "Generic code assumes a valid intrinsic");
- // Determine the type(s) of this overloaded AArch64 intrinsic.
- const Expr *Arg = E->getArg(0);
- llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
- Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
- int j = 0;
- ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
- for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
- ai != ae; ++ai, ++j) {
- llvm::Type *ArgTy = ai->getType();
- if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
- ArgTy->getPrimitiveSizeInBits())
- continue;
- assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
- // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
- // it before inserting.
- Ops[j] =
- CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
- Ops[j] =
- CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
- }
- Value *Result = CGF.EmitNeonCall(F, Ops, s);
- llvm::Type *ResultType = CGF.ConvertType(E->getType());
- if (ResultType->getPrimitiveSizeInBits() <
- Result->getType()->getPrimitiveSizeInBits())
- return CGF.Builder.CreateExtractElement(Result, C0);
- return CGF.Builder.CreateBitCast(Result, ResultType, s);
- }
- Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
- unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
- const char *NameHint, unsigned Modifier, const CallExpr *E,
- SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
- llvm::Triple::ArchType Arch) {
- // Get the last argument, which specifies the vector type.
- llvm::APSInt NeonTypeConst;
- const Expr *Arg = E->getArg(E->getNumArgs() - 1);
- if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
- return nullptr;
- // Determine the type of this overloaded NEON intrinsic.
- NeonTypeFlags Type(NeonTypeConst.getZExtValue());
- bool Usgn = Type.isUnsigned();
- bool Quad = Type.isQuad();
- const bool HasLegalHalfType = getTarget().hasLegalHalfType();
- llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType);
- llvm::Type *Ty = VTy;
- if (!Ty)
- return nullptr;
- auto getAlignmentValue32 = [&](Address addr) -> Value* {
- return Builder.getInt32(addr.getAlignment().getQuantity());
- };
- unsigned Int = LLVMIntrinsic;
- if ((Modifier & UnsignedAlts) && !Usgn)
- Int = AltLLVMIntrinsic;
- switch (BuiltinID) {
- default: break;
- case NEON::BI__builtin_neon_vpadd_v:
- case NEON::BI__builtin_neon_vpaddq_v:
- // We don't allow fp/int overloading of intrinsics.
- if (VTy->getElementType()->isFloatingPointTy() &&
- Int == Intrinsic::aarch64_neon_addp)
- Int = Intrinsic::aarch64_neon_faddp;
- break;
- case NEON::BI__builtin_neon_vabs_v:
- case NEON::BI__builtin_neon_vabsq_v:
- if (VTy->getElementType()->isFloatingPointTy())
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
- return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
- case NEON::BI__builtin_neon_vaddhn_v: {
- llvm::VectorType *SrcTy =
- llvm::VectorType::getExtendedElementVectorType(VTy);
- // %sum = add <4 x i32> %lhs, %rhs
- Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
- Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
- // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
- Constant *ShiftAmt =
- ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
- Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
- // %res = trunc <4 x i32> %high to <4 x i16>
- return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
- }
- case NEON::BI__builtin_neon_vcale_v:
- case NEON::BI__builtin_neon_vcaleq_v:
- case NEON::BI__builtin_neon_vcalt_v:
- case NEON::BI__builtin_neon_vcaltq_v:
- std::swap(Ops[0], Ops[1]);
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcage_v:
- case NEON::BI__builtin_neon_vcageq_v:
- case NEON::BI__builtin_neon_vcagt_v:
- case NEON::BI__builtin_neon_vcagtq_v: {
- llvm::Type *Ty;
- switch (VTy->getScalarSizeInBits()) {
- default: llvm_unreachable("unexpected type");
- case 32:
- Ty = FloatTy;
- break;
- case 64:
- Ty = DoubleTy;
- break;
- case 16:
- Ty = HalfTy;
- break;
- }
- llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
- llvm::Type *Tys[] = { VTy, VecFlt };
- Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
- return EmitNeonCall(F, Ops, NameHint);
- }
- case NEON::BI__builtin_neon_vceqz_v:
- case NEON::BI__builtin_neon_vceqzq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
- ICmpInst::ICMP_EQ, "vceqz");
- case NEON::BI__builtin_neon_vcgez_v:
- case NEON::BI__builtin_neon_vcgezq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
- ICmpInst::ICMP_SGE, "vcgez");
- case NEON::BI__builtin_neon_vclez_v:
- case NEON::BI__builtin_neon_vclezq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
- ICmpInst::ICMP_SLE, "vclez");
- case NEON::BI__builtin_neon_vcgtz_v:
- case NEON::BI__builtin_neon_vcgtzq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
- ICmpInst::ICMP_SGT, "vcgtz");
- case NEON::BI__builtin_neon_vcltz_v:
- case NEON::BI__builtin_neon_vcltzq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
- ICmpInst::ICMP_SLT, "vcltz");
- case NEON::BI__builtin_neon_vclz_v:
- case NEON::BI__builtin_neon_vclzq_v:
- // We generate target-independent intrinsic, which needs a second argument
- // for whether or not clz of zero is undefined; on ARM it isn't.
- Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
- break;
- case NEON::BI__builtin_neon_vcvt_f32_v:
- case NEON::BI__builtin_neon_vcvtq_f32_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
- HasLegalHalfType);
- return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
- : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
- case NEON::BI__builtin_neon_vcvt_f16_v:
- case NEON::BI__builtin_neon_vcvtq_f16_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
- HasLegalHalfType);
- return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
- : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
- case NEON::BI__builtin_neon_vcvt_n_f16_v:
- case NEON::BI__builtin_neon_vcvt_n_f32_v:
- case NEON::BI__builtin_neon_vcvt_n_f64_v:
- case NEON::BI__builtin_neon_vcvtq_n_f16_v:
- case NEON::BI__builtin_neon_vcvtq_n_f32_v:
- case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
- llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
- Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
- Function *F = CGM.getIntrinsic(Int, Tys);
- return EmitNeonCall(F, Ops, "vcvt_n");
- }
- case NEON::BI__builtin_neon_vcvt_n_s16_v:
- case NEON::BI__builtin_neon_vcvt_n_s32_v:
- case NEON::BI__builtin_neon_vcvt_n_u16_v:
- case NEON::BI__builtin_neon_vcvt_n_u32_v:
- case NEON::BI__builtin_neon_vcvt_n_s64_v:
- case NEON::BI__builtin_neon_vcvt_n_u64_v:
- case NEON::BI__builtin_neon_vcvtq_n_s16_v:
- case NEON::BI__builtin_neon_vcvtq_n_s32_v:
- case NEON::BI__builtin_neon_vcvtq_n_u16_v:
- case NEON::BI__builtin_neon_vcvtq_n_u32_v:
- case NEON::BI__builtin_neon_vcvtq_n_s64_v:
- case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
- Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
- return EmitNeonCall(F, Ops, "vcvt_n");
- }
- case NEON::BI__builtin_neon_vcvt_s32_v:
- case NEON::BI__builtin_neon_vcvt_u32_v:
- case NEON::BI__builtin_neon_vcvt_s64_v:
- case NEON::BI__builtin_neon_vcvt_u64_v:
- case NEON::BI__builtin_neon_vcvt_s16_v:
- case NEON::BI__builtin_neon_vcvt_u16_v:
- case NEON::BI__builtin_neon_vcvtq_s32_v:
- case NEON::BI__builtin_neon_vcvtq_u32_v:
- case NEON::BI__builtin_neon_vcvtq_s64_v:
- case NEON::BI__builtin_neon_vcvtq_u64_v:
- case NEON::BI__builtin_neon_vcvtq_s16_v:
- case NEON::BI__builtin_neon_vcvtq_u16_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
- return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
- : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
- }
- case NEON::BI__builtin_neon_vcvta_s16_v:
- case NEON::BI__builtin_neon_vcvta_s32_v:
- case NEON::BI__builtin_neon_vcvta_s64_v:
- case NEON::BI__builtin_neon_vcvta_u16_v:
- case NEON::BI__builtin_neon_vcvta_u32_v:
- case NEON::BI__builtin_neon_vcvta_u64_v:
- case NEON::BI__builtin_neon_vcvtaq_s16_v:
- case NEON::BI__builtin_neon_vcvtaq_s32_v:
- case NEON::BI__builtin_neon_vcvtaq_s64_v:
- case NEON::BI__builtin_neon_vcvtaq_u16_v:
- case NEON::BI__builtin_neon_vcvtaq_u32_v:
- case NEON::BI__builtin_neon_vcvtaq_u64_v:
- case NEON::BI__builtin_neon_vcvtn_s16_v:
- case NEON::BI__builtin_neon_vcvtn_s32_v:
- case NEON::BI__builtin_neon_vcvtn_s64_v:
- case NEON::BI__builtin_neon_vcvtn_u16_v:
- case NEON::BI__builtin_neon_vcvtn_u32_v:
- case NEON::BI__builtin_neon_vcvtn_u64_v:
- case NEON::BI__builtin_neon_vcvtnq_s16_v:
- case NEON::BI__builtin_neon_vcvtnq_s32_v:
- case NEON::BI__builtin_neon_vcvtnq_s64_v:
- case NEON::BI__builtin_neon_vcvtnq_u16_v:
- case NEON::BI__builtin_neon_vcvtnq_u32_v:
- case NEON::BI__builtin_neon_vcvtnq_u64_v:
- case NEON::BI__builtin_neon_vcvtp_s16_v:
- case NEON::BI__builtin_neon_vcvtp_s32_v:
- case NEON::BI__builtin_neon_vcvtp_s64_v:
- case NEON::BI__builtin_neon_vcvtp_u16_v:
- case NEON::BI__builtin_neon_vcvtp_u32_v:
- case NEON::BI__builtin_neon_vcvtp_u64_v:
- case NEON::BI__builtin_neon_vcvtpq_s16_v:
- case NEON::BI__builtin_neon_vcvtpq_s32_v:
- case NEON::BI__builtin_neon_vcvtpq_s64_v:
- case NEON::BI__builtin_neon_vcvtpq_u16_v:
- case NEON::BI__builtin_neon_vcvtpq_u32_v:
- case NEON::BI__builtin_neon_vcvtpq_u64_v:
- case NEON::BI__builtin_neon_vcvtm_s16_v:
- case NEON::BI__builtin_neon_vcvtm_s32_v:
- case NEON::BI__builtin_neon_vcvtm_s64_v:
- case NEON::BI__builtin_neon_vcvtm_u16_v:
- case NEON::BI__builtin_neon_vcvtm_u32_v:
- case NEON::BI__builtin_neon_vcvtm_u64_v:
- case NEON::BI__builtin_neon_vcvtmq_s16_v:
- case NEON::BI__builtin_neon_vcvtmq_s32_v:
- case NEON::BI__builtin_neon_vcvtmq_s64_v:
- case NEON::BI__builtin_neon_vcvtmq_u16_v:
- case NEON::BI__builtin_neon_vcvtmq_u32_v:
- case NEON::BI__builtin_neon_vcvtmq_u64_v: {
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
- return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
- }
- case NEON::BI__builtin_neon_vext_v:
- case NEON::BI__builtin_neon_vextq_v: {
- int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
- SmallVector<uint32_t, 16> Indices;
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
- Indices.push_back(i+CV);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
- }
- case NEON::BI__builtin_neon_vfma_v:
- case NEON::BI__builtin_neon_vfmaq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- // NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
- }
- case NEON::BI__builtin_neon_vld1_v:
- case NEON::BI__builtin_neon_vld1q_v: {
- llvm::Type *Tys[] = {Ty, Int8PtrTy};
- Ops.push_back(getAlignmentValue32(PtrOp0));
- return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
- }
- case NEON::BI__builtin_neon_vld1_x2_v:
- case NEON::BI__builtin_neon_vld1q_x2_v:
- case NEON::BI__builtin_neon_vld1_x3_v:
- case NEON::BI__builtin_neon_vld1q_x3_v:
- case NEON::BI__builtin_neon_vld1_x4_v:
- case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
- Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld2_v:
- case NEON::BI__builtin_neon_vld2q_v:
- case NEON::BI__builtin_neon_vld3_v:
- case NEON::BI__builtin_neon_vld3q_v:
- case NEON::BI__builtin_neon_vld4_v:
- case NEON::BI__builtin_neon_vld4q_v:
- case NEON::BI__builtin_neon_vld2_dup_v:
- case NEON::BI__builtin_neon_vld2q_dup_v:
- case NEON::BI__builtin_neon_vld3_dup_v:
- case NEON::BI__builtin_neon_vld3q_dup_v:
- case NEON::BI__builtin_neon_vld4_dup_v:
- case NEON::BI__builtin_neon_vld4q_dup_v: {
- llvm::Type *Tys[] = {Ty, Int8PtrTy};
- Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
- Value *Align = getAlignmentValue32(PtrOp1);
- Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld1_dup_v:
- case NEON::BI__builtin_neon_vld1q_dup_v: {
- Value *V = UndefValue::get(Ty);
- Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
- LoadInst *Ld = Builder.CreateLoad(PtrOp0);
- llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
- Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
- return EmitNeonSplat(Ops[0], CI);
- }
- case NEON::BI__builtin_neon_vld2_lane_v:
- case NEON::BI__builtin_neon_vld2q_lane_v:
- case NEON::BI__builtin_neon_vld3_lane_v:
- case NEON::BI__builtin_neon_vld3q_lane_v:
- case NEON::BI__builtin_neon_vld4_lane_v:
- case NEON::BI__builtin_neon_vld4q_lane_v: {
- llvm::Type *Tys[] = {Ty, Int8PtrTy};
- Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
- for (unsigned I = 2; I < Ops.size() - 1; ++I)
- Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
- Ops.push_back(getAlignmentValue32(PtrOp1));
- Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vmovl_v: {
- llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
- if (Usgn)
- return Builder.CreateZExt(Ops[0], Ty, "vmovl");
- return Builder.CreateSExt(Ops[0], Ty, "vmovl");
- }
- case NEON::BI__builtin_neon_vmovn_v: {
- llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
- return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
- }
- case NEON::BI__builtin_neon_vmull_v:
- // FIXME: the integer vmull operations could be emitted in terms of pure
- // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
- // hoisting the exts outside loops. Until global ISel comes along that can
- // see through such movement this leads to bad CodeGen. So we need an
- // intrinsic for now.
- Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
- Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
- case NEON::BI__builtin_neon_vpadal_v:
- case NEON::BI__builtin_neon_vpadalq_v: {
- // The source operand type has twice as many elements of half the size.
- unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- llvm::Type *EltTy =
- llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- llvm::Type *NarrowTy =
- llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
- llvm::Type *Tys[2] = { Ty, NarrowTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
- }
- case NEON::BI__builtin_neon_vpaddl_v:
- case NEON::BI__builtin_neon_vpaddlq_v: {
- // The source operand type has twice as many elements of half the size.
- unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- llvm::Type *NarrowTy =
- llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
- llvm::Type *Tys[2] = { Ty, NarrowTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
- }
- case NEON::BI__builtin_neon_vqdmlal_v:
- case NEON::BI__builtin_neon_vqdmlsl_v: {
- SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
- Ops[1] =
- EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
- Ops.resize(2);
- return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
- }
- case NEON::BI__builtin_neon_vqshl_n_v:
- case NEON::BI__builtin_neon_vqshlq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
- 1, false);
- case NEON::BI__builtin_neon_vqshlu_n_v:
- case NEON::BI__builtin_neon_vqshluq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
- 1, false);
- case NEON::BI__builtin_neon_vrecpe_v:
- case NEON::BI__builtin_neon_vrecpeq_v:
- case NEON::BI__builtin_neon_vrsqrte_v:
- case NEON::BI__builtin_neon_vrsqrteq_v:
- Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
- case NEON::BI__builtin_neon_vrndi_v:
- case NEON::BI__builtin_neon_vrndiq_v:
- Int = Intrinsic::nearbyint;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
- case NEON::BI__builtin_neon_vrshr_n_v:
- case NEON::BI__builtin_neon_vrshrq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
- 1, true);
- case NEON::BI__builtin_neon_vshl_n_v:
- case NEON::BI__builtin_neon_vshlq_n_v:
- Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
- return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
- "vshl_n");
- case NEON::BI__builtin_neon_vshll_n_v: {
- llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
- if (Usgn)
- Ops[0] = Builder.CreateZExt(Ops[0], VTy);
- else
- Ops[0] = Builder.CreateSExt(Ops[0], VTy);
- Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
- return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
- }
- case NEON::BI__builtin_neon_vshrn_n_v: {
- llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
- Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
- if (Usgn)
- Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
- else
- Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
- return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
- }
- case NEON::BI__builtin_neon_vshr_n_v:
- case NEON::BI__builtin_neon_vshrq_n_v:
- return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
- case NEON::BI__builtin_neon_vst1_v:
- case NEON::BI__builtin_neon_vst1q_v:
- case NEON::BI__builtin_neon_vst2_v:
- case NEON::BI__builtin_neon_vst2q_v:
- case NEON::BI__builtin_neon_vst3_v:
- case NEON::BI__builtin_neon_vst3q_v:
- case NEON::BI__builtin_neon_vst4_v:
- case NEON::BI__builtin_neon_vst4q_v:
- case NEON::BI__builtin_neon_vst2_lane_v:
- case NEON::BI__builtin_neon_vst2q_lane_v:
- case NEON::BI__builtin_neon_vst3_lane_v:
- case NEON::BI__builtin_neon_vst3q_lane_v:
- case NEON::BI__builtin_neon_vst4_lane_v:
- case NEON::BI__builtin_neon_vst4q_lane_v: {
- llvm::Type *Tys[] = {Int8PtrTy, Ty};
- Ops.push_back(getAlignmentValue32(PtrOp0));
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
- }
- case NEON::BI__builtin_neon_vst1_x2_v:
- case NEON::BI__builtin_neon_vst1q_x2_v:
- case NEON::BI__builtin_neon_vst1_x3_v:
- case NEON::BI__builtin_neon_vst1q_x3_v:
- case NEON::BI__builtin_neon_vst1_x4_v:
- case NEON::BI__builtin_neon_vst1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
- // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
- // in AArch64 it comes last. We may want to stick to one or another.
- if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be) {
- llvm::Type *Tys[2] = { VTy, PTy };
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
- return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
- }
- llvm::Type *Tys[2] = { PTy, VTy };
- return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
- }
- case NEON::BI__builtin_neon_vsubhn_v: {
- llvm::VectorType *SrcTy =
- llvm::VectorType::getExtendedElementVectorType(VTy);
- // %sum = add <4 x i32> %lhs, %rhs
- Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
- Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
- // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
- Constant *ShiftAmt =
- ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
- Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
- // %res = trunc <4 x i32> %high to <4 x i16>
- return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
- }
- case NEON::BI__builtin_neon_vtrn_v:
- case NEON::BI__builtin_neon_vtrnq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = nullptr;
- for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back(i+vi);
- Indices.push_back(i+e+vi);
- }
- Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
- SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
- SV = Builder.CreateDefaultAlignedStore(SV, Addr);
- }
- return SV;
- }
- case NEON::BI__builtin_neon_vtst_v:
- case NEON::BI__builtin_neon_vtstq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
- Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
- ConstantAggregateZero::get(Ty));
- return Builder.CreateSExt(Ops[0], Ty, "vtst");
- }
- case NEON::BI__builtin_neon_vuzp_v:
- case NEON::BI__builtin_neon_vuzpq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = nullptr;
- for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
- Indices.push_back(2*i+vi);
- Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
- SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
- SV = Builder.CreateDefaultAlignedStore(SV, Addr);
- }
- return SV;
- }
- case NEON::BI__builtin_neon_vzip_v:
- case NEON::BI__builtin_neon_vzipq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = nullptr;
- for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back((i + vi*e) >> 1);
- Indices.push_back(((i + vi*e) >> 1)+e);
- }
- Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
- SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
- SV = Builder.CreateDefaultAlignedStore(SV, Addr);
- }
- return SV;
- }
- case NEON::BI__builtin_neon_vdot_v:
- case NEON::BI__builtin_neon_vdotq_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
- llvm::Type *Tys[2] = { Ty, InputTy };
- Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
- }
- case NEON::BI__builtin_neon_vfmlal_low_v:
- case NEON::BI__builtin_neon_vfmlalq_low_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
- }
- case NEON::BI__builtin_neon_vfmlsl_low_v:
- case NEON::BI__builtin_neon_vfmlslq_low_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
- }
- case NEON::BI__builtin_neon_vfmlal_high_v:
- case NEON::BI__builtin_neon_vfmlalq_high_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
- }
- case NEON::BI__builtin_neon_vfmlsl_high_v:
- case NEON::BI__builtin_neon_vfmlslq_high_v: {
- llvm::Type *InputTy =
- llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
- llvm::Type *Tys[2] = { Ty, InputTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
- }
- }
- assert(Int && "Expected valid intrinsic number");
- // Determine the type(s) of this overloaded AArch64 intrinsic.
- Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
- Value *Result = EmitNeonCall(F, Ops, NameHint);
- llvm::Type *ResultType = ConvertType(E->getType());
- // AArch64 intrinsic one-element vector type cast to
- // scalar type expected by the builtin
- return Builder.CreateBitCast(Result, ResultType, NameHint);
- }
- Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
- Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
- const CmpInst::Predicate Ip, const Twine &Name) {
- llvm::Type *OTy = Op->getType();
- // FIXME: this is utterly horrific. We should not be looking at previous
- // codegen context to find out what needs doing. Unfortunately TableGen
- // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
- // (etc).
- if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
- OTy = BI->getOperand(0)->getType();
- Op = Builder.CreateBitCast(Op, OTy);
- if (OTy->getScalarType()->isFloatingPointTy()) {
- Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
- } else {
- Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
- }
- return Builder.CreateSExt(Op, Ty, Name);
- }
- static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
- Value *ExtOp, Value *IndexOp,
- llvm::Type *ResTy, unsigned IntID,
- const char *Name) {
- SmallVector<Value *, 2> TblOps;
- if (ExtOp)
- TblOps.push_back(ExtOp);
- // Build a vector containing sequential number like (0, 1, 2, ..., 15)
- SmallVector<uint32_t, 16> Indices;
- llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
- for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
- Indices.push_back(2*i);
- Indices.push_back(2*i+1);
- }
- int PairPos = 0, End = Ops.size() - 1;
- while (PairPos < End) {
- TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
- Ops[PairPos+1], Indices,
- Name));
- PairPos += 2;
- }
- // If there's an odd number of 64-bit lookup table, fill the high 64-bit
- // of the 128-bit lookup table with zero.
- if (PairPos == End) {
- Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
- TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
- ZeroTbl, Indices, Name));
- }
- Function *TblF;
- TblOps.push_back(IndexOp);
- TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
- return CGF.EmitNeonCall(TblF, TblOps, Name);
- }
- Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
- unsigned Value;
- switch (BuiltinID) {
- default:
- return nullptr;
- case ARM::BI__builtin_arm_nop:
- Value = 0;
- break;
- case ARM::BI__builtin_arm_yield:
- case ARM::BI__yield:
- Value = 1;
- break;
- case ARM::BI__builtin_arm_wfe:
- case ARM::BI__wfe:
- Value = 2;
- break;
- case ARM::BI__builtin_arm_wfi:
- case ARM::BI__wfi:
- Value = 3;
- break;
- case ARM::BI__builtin_arm_sev:
- case ARM::BI__sev:
- Value = 4;
- break;
- case ARM::BI__builtin_arm_sevl:
- case ARM::BI__sevl:
- Value = 5;
- break;
- }
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
- llvm::ConstantInt::get(Int32Ty, Value));
- }
- // Generates the IR for the read/write special register builtin,
- // ValueType is the type of the value that is to be written or read,
- // RegisterType is the type of the register being written to or read from.
- static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- llvm::Type *RegisterType,
- llvm::Type *ValueType,
- bool IsRead,
- StringRef SysReg = "") {
- // write and register intrinsics only support 32 and 64 bit operations.
- assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
- && "Unsupported size for register.");
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- CodeGen::CodeGenModule &CGM = CGF.CGM;
- LLVMContext &Context = CGM.getLLVMContext();
- if (SysReg.empty()) {
- const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
- SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
- }
- llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
- llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
- llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
- llvm::Type *Types[] = { RegisterType };
- bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
- assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
- && "Can't fit 64-bit value in 32-bit register");
- if (IsRead) {
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
- llvm::Value *Call = Builder.CreateCall(F, Metadata);
- if (MixedTypes)
- // Read into 64 bit register and then truncate result to 32 bit.
- return Builder.CreateTrunc(Call, ValueType);
- if (ValueType->isPointerTy())
- // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
- return Builder.CreateIntToPtr(Call, ValueType);
- return Call;
- }
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
- llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
- if (MixedTypes) {
- // Extend 32 bit write value to 64 bit to pass to write.
- ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
- return Builder.CreateCall(F, { Metadata, ArgValue });
- }
- if (ValueType->isPointerTy()) {
- // Have VoidPtrTy ArgValue but want to return an i32/i64.
- ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
- return Builder.CreateCall(F, { Metadata, ArgValue });
- }
- return Builder.CreateCall(F, { Metadata, ArgValue });
- }
- /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
- /// argument that specifies the vector type.
- static bool HasExtraNeonArgument(unsigned BuiltinID) {
- switch (BuiltinID) {
- default: break;
- case NEON::BI__builtin_neon_vget_lane_i8:
- case NEON::BI__builtin_neon_vget_lane_i16:
- case NEON::BI__builtin_neon_vget_lane_i32:
- case NEON::BI__builtin_neon_vget_lane_i64:
- case NEON::BI__builtin_neon_vget_lane_f32:
- case NEON::BI__builtin_neon_vgetq_lane_i8:
- case NEON::BI__builtin_neon_vgetq_lane_i16:
- case NEON::BI__builtin_neon_vgetq_lane_i32:
- case NEON::BI__builtin_neon_vgetq_lane_i64:
- case NEON::BI__builtin_neon_vgetq_lane_f32:
- case NEON::BI__builtin_neon_vset_lane_i8:
- case NEON::BI__builtin_neon_vset_lane_i16:
- case NEON::BI__builtin_neon_vset_lane_i32:
- case NEON::BI__builtin_neon_vset_lane_i64:
- case NEON::BI__builtin_neon_vset_lane_f32:
- case NEON::BI__builtin_neon_vsetq_lane_i8:
- case NEON::BI__builtin_neon_vsetq_lane_i16:
- case NEON::BI__builtin_neon_vsetq_lane_i32:
- case NEON::BI__builtin_neon_vsetq_lane_i64:
- case NEON::BI__builtin_neon_vsetq_lane_f32:
- case NEON::BI__builtin_neon_vsha1h_u32:
- case NEON::BI__builtin_neon_vsha1cq_u32:
- case NEON::BI__builtin_neon_vsha1pq_u32:
- case NEON::BI__builtin_neon_vsha1mq_u32:
- case clang::ARM::BI_MoveToCoprocessor:
- case clang::ARM::BI_MoveToCoprocessor2:
- return false;
- }
- return true;
- }
- Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E,
- llvm::Triple::ArchType Arch) {
- if (auto Hint = GetValueForARMHint(BuiltinID))
- return Hint;
- if (BuiltinID == ARM::BI__emit) {
- bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
- Expr::EvalResult Result;
- if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
- llvm_unreachable("Sema will ensure that the parameter is constant");
- llvm::APSInt Value = Result.Val.getInt();
- uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
- llvm::InlineAsm *Emit =
- IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
- /*hasSideEffects=*/true)
- : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
- /*hasSideEffects=*/true);
- return Builder.CreateCall(Emit);
- }
- if (BuiltinID == ARM::BI__builtin_arm_dbg) {
- Value *Option = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
- }
- if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *RW = EmitScalarExpr(E->getArg(1));
- Value *IsData = EmitScalarExpr(E->getArg(2));
- // Locality is not supported on ARM target
- Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
- Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
- return Builder.CreateCall(F, {Address, RW, Locality, IsData});
- }
- if (BuiltinID == ARM::BI__builtin_arm_rbit) {
- llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(
- CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
- }
- if (BuiltinID == ARM::BI__clear_cache) {
- assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
- const FunctionDecl *FD = E->getDirectCallee();
- Value *Ops[2];
- for (unsigned i = 0; i < 2; i++)
- Ops[i] = EmitScalarExpr(E->getArg(i));
- llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
- llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
- StringRef Name = FD->getName();
- return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
- }
- if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
- BuiltinID == ARM::BI__builtin_arm_mcrr2) {
- Function *F;
- switch (BuiltinID) {
- default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_mcrr:
- F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
- break;
- case ARM::BI__builtin_arm_mcrr2:
- F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
- break;
- }
- // MCRR{2} instruction has 5 operands but
- // the intrinsic has 4 because Rt and Rt2
- // are represented as a single unsigned 64
- // bit integer in the intrinsic definition
- // but internally it's represented as 2 32
- // bit integers.
- Value *Coproc = EmitScalarExpr(E->getArg(0));
- Value *Opc1 = EmitScalarExpr(E->getArg(1));
- Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
- Value *CRm = EmitScalarExpr(E->getArg(3));
- Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
- Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
- Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
- Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
- return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
- }
- if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
- BuiltinID == ARM::BI__builtin_arm_mrrc2) {
- Function *F;
- switch (BuiltinID) {
- default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_mrrc:
- F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
- break;
- case ARM::BI__builtin_arm_mrrc2:
- F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
- break;
- }
- Value *Coproc = EmitScalarExpr(E->getArg(0));
- Value *Opc1 = EmitScalarExpr(E->getArg(1));
- Value *CRm = EmitScalarExpr(E->getArg(2));
- Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
- // Returns an unsigned 64 bit integer, represented
- // as two 32 bit integers.
- Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
- Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
- Rt = Builder.CreateZExt(Rt, Int64Ty);
- Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
- Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
- RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
- RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
- return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
- }
- if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
- ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex) &&
- getContext().getTypeSize(E->getType()) == 64) ||
- BuiltinID == ARM::BI__ldrexd) {
- Function *F;
- switch (BuiltinID) {
- default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_ldaex:
- F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
- break;
- case ARM::BI__builtin_arm_ldrexd:
- case ARM::BI__builtin_arm_ldrex:
- case ARM::BI__ldrexd:
- F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
- break;
- }
- Value *LdPtr = EmitScalarExpr(E->getArg(0));
- Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
- "ldrexd");
- Value *Val0 = Builder.CreateExtractValue(Val, 1);
- Value *Val1 = Builder.CreateExtractValue(Val, 0);
- Val0 = Builder.CreateZExt(Val0, Int64Ty);
- Val1 = Builder.CreateZExt(Val1, Int64Ty);
- Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
- Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
- Val = Builder.CreateOr(Val, Val1);
- return Builder.CreateBitCast(Val, ConvertType(E->getType()));
- }
- if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex) {
- Value *LoadAddr = EmitScalarExpr(E->getArg(0));
- QualType Ty = E->getType();
- llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *PtrTy = llvm::IntegerType::get(
- getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
- LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
- ? Intrinsic::arm_ldaex
- : Intrinsic::arm_ldrex,
- PtrTy);
- Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
- if (RealResTy->isPointerTy())
- return Builder.CreateIntToPtr(Val, RealResTy);
- else {
- llvm::Type *IntResTy = llvm::IntegerType::get(
- getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
- Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
- return Builder.CreateBitCast(Val, RealResTy);
- }
- }
- if (BuiltinID == ARM::BI__builtin_arm_strexd ||
- ((BuiltinID == ARM::BI__builtin_arm_stlex ||
- BuiltinID == ARM::BI__builtin_arm_strex) &&
- getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
- ? Intrinsic::arm_stlexd
- : Intrinsic::arm_strexd);
- llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
- Address Tmp = CreateMemTemp(E->getArg(0)->getType());
- Value *Val = EmitScalarExpr(E->getArg(0));
- Builder.CreateStore(Val, Tmp);
- Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
- Val = Builder.CreateLoad(LdPtr);
- Value *Arg0 = Builder.CreateExtractValue(Val, 0);
- Value *Arg1 = Builder.CreateExtractValue(Val, 1);
- Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
- return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
- }
- if (BuiltinID == ARM::BI__builtin_arm_strex ||
- BuiltinID == ARM::BI__builtin_arm_stlex) {
- Value *StoreVal = EmitScalarExpr(E->getArg(0));
- Value *StoreAddr = EmitScalarExpr(E->getArg(1));
- QualType Ty = E->getArg(0)->getType();
- llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
- if (StoreVal->getType()->isPointerTy())
- StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
- else {
- llvm::Type *IntTy = llvm::IntegerType::get(
- getLLVMContext(),
- CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
- StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
- StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
- }
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
- ? Intrinsic::arm_stlex
- : Intrinsic::arm_strex,
- StoreAddr->getType());
- return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
- }
- if (BuiltinID == ARM::BI__builtin_arm_clrex) {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
- return Builder.CreateCall(F);
- }
- // CRC32
- Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
- switch (BuiltinID) {
- case ARM::BI__builtin_arm_crc32b:
- CRCIntrinsicID = Intrinsic::arm_crc32b; break;
- case ARM::BI__builtin_arm_crc32cb:
- CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
- case ARM::BI__builtin_arm_crc32h:
- CRCIntrinsicID = Intrinsic::arm_crc32h; break;
- case ARM::BI__builtin_arm_crc32ch:
- CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
- case ARM::BI__builtin_arm_crc32w:
- case ARM::BI__builtin_arm_crc32d:
- CRCIntrinsicID = Intrinsic::arm_crc32w; break;
- case ARM::BI__builtin_arm_crc32cw:
- case ARM::BI__builtin_arm_crc32cd:
- CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
- }
- if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
- Value *Arg0 = EmitScalarExpr(E->getArg(0));
- Value *Arg1 = EmitScalarExpr(E->getArg(1));
- // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
- // intrinsics, hence we need different codegen for these cases.
- if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
- BuiltinID == ARM::BI__builtin_arm_crc32cd) {
- Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
- Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
- Value *Arg1b = Builder.CreateLShr(Arg1, C1);
- Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
- Function *F = CGM.getIntrinsic(CRCIntrinsicID);
- Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
- return Builder.CreateCall(F, {Res, Arg1b});
- } else {
- Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
- Function *F = CGM.getIntrinsic(CRCIntrinsicID);
- return Builder.CreateCall(F, {Arg0, Arg1});
- }
- }
- if (BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsr ||
- BuiltinID == ARM::BI__builtin_arm_wsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsrp) {
- bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsrp;
- bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsrp;
- bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsr64;
- llvm::Type *ValueType;
- llvm::Type *RegisterType;
- if (IsPointerBuiltin) {
- ValueType = VoidPtrTy;
- RegisterType = Int32Ty;
- } else if (Is64Bit) {
- ValueType = RegisterType = Int64Ty;
- } else {
- ValueType = RegisterType = Int32Ty;
- }
- return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
- }
- // Find out if any arguments are required to be integer constant
- // expressions.
- unsigned ICEArguments = 0;
- ASTContext::GetBuiltinTypeError Error;
- getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
- assert(Error == ASTContext::GE_None && "Should not codegen an error");
- auto getAlignmentValue32 = [&](Address addr) -> Value* {
- return Builder.getInt32(addr.getAlignment().getQuantity());
- };
- Address PtrOp0 = Address::invalid();
- Address PtrOp1 = Address::invalid();
- SmallVector<Value*, 4> Ops;
- bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
- unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
- for (unsigned i = 0, e = NumArgs; i != e; i++) {
- if (i == 0) {
- switch (BuiltinID) {
- case NEON::BI__builtin_neon_vld1_v:
- case NEON::BI__builtin_neon_vld1q_v:
- case NEON::BI__builtin_neon_vld1q_lane_v:
- case NEON::BI__builtin_neon_vld1_lane_v:
- case NEON::BI__builtin_neon_vld1_dup_v:
- case NEON::BI__builtin_neon_vld1q_dup_v:
- case NEON::BI__builtin_neon_vst1_v:
- case NEON::BI__builtin_neon_vst1q_v:
- case NEON::BI__builtin_neon_vst1q_lane_v:
- case NEON::BI__builtin_neon_vst1_lane_v:
- case NEON::BI__builtin_neon_vst2_v:
- case NEON::BI__builtin_neon_vst2q_v:
- case NEON::BI__builtin_neon_vst2_lane_v:
- case NEON::BI__builtin_neon_vst2q_lane_v:
- case NEON::BI__builtin_neon_vst3_v:
- case NEON::BI__builtin_neon_vst3q_v:
- case NEON::BI__builtin_neon_vst3_lane_v:
- case NEON::BI__builtin_neon_vst3q_lane_v:
- case NEON::BI__builtin_neon_vst4_v:
- case NEON::BI__builtin_neon_vst4q_v:
- case NEON::BI__builtin_neon_vst4_lane_v:
- case NEON::BI__builtin_neon_vst4q_lane_v:
- // Get the alignment for the argument in addition to the value;
- // we'll use it later.
- PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
- Ops.push_back(PtrOp0.getPointer());
- continue;
- }
- }
- if (i == 1) {
- switch (BuiltinID) {
- case NEON::BI__builtin_neon_vld2_v:
- case NEON::BI__builtin_neon_vld2q_v:
- case NEON::BI__builtin_neon_vld3_v:
- case NEON::BI__builtin_neon_vld3q_v:
- case NEON::BI__builtin_neon_vld4_v:
- case NEON::BI__builtin_neon_vld4q_v:
- case NEON::BI__builtin_neon_vld2_lane_v:
- case NEON::BI__builtin_neon_vld2q_lane_v:
- case NEON::BI__builtin_neon_vld3_lane_v:
- case NEON::BI__builtin_neon_vld3q_lane_v:
- case NEON::BI__builtin_neon_vld4_lane_v:
- case NEON::BI__builtin_neon_vld4q_lane_v:
- case NEON::BI__builtin_neon_vld2_dup_v:
- case NEON::BI__builtin_neon_vld2q_dup_v:
- case NEON::BI__builtin_neon_vld3_dup_v:
- case NEON::BI__builtin_neon_vld3q_dup_v:
- case NEON::BI__builtin_neon_vld4_dup_v:
- case NEON::BI__builtin_neon_vld4q_dup_v:
- // Get the alignment for the argument in addition to the value;
- // we'll use it later.
- PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
- Ops.push_back(PtrOp1.getPointer());
- continue;
- }
- }
- if ((ICEArguments & (1 << i)) == 0) {
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- } else {
- // If this is required to be a constant, constant fold it so that we know
- // that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
- assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
- Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
- }
- }
- switch (BuiltinID) {
- default: break;
- case NEON::BI__builtin_neon_vget_lane_i8:
- case NEON::BI__builtin_neon_vget_lane_i16:
- case NEON::BI__builtin_neon_vget_lane_i32:
- case NEON::BI__builtin_neon_vget_lane_i64:
- case NEON::BI__builtin_neon_vget_lane_f32:
- case NEON::BI__builtin_neon_vgetq_lane_i8:
- case NEON::BI__builtin_neon_vgetq_lane_i16:
- case NEON::BI__builtin_neon_vgetq_lane_i32:
- case NEON::BI__builtin_neon_vgetq_lane_i64:
- case NEON::BI__builtin_neon_vgetq_lane_f32:
- return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
- case NEON::BI__builtin_neon_vrndns_f32: {
- Value *Arg = EmitScalarExpr(E->getArg(0));
- llvm::Type *Tys[] = {Arg->getType()};
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
- return Builder.CreateCall(F, {Arg}, "vrndn"); }
- case NEON::BI__builtin_neon_vset_lane_i8:
- case NEON::BI__builtin_neon_vset_lane_i16:
- case NEON::BI__builtin_neon_vset_lane_i32:
- case NEON::BI__builtin_neon_vset_lane_i64:
- case NEON::BI__builtin_neon_vset_lane_f32:
- case NEON::BI__builtin_neon_vsetq_lane_i8:
- case NEON::BI__builtin_neon_vsetq_lane_i16:
- case NEON::BI__builtin_neon_vsetq_lane_i32:
- case NEON::BI__builtin_neon_vsetq_lane_i64:
- case NEON::BI__builtin_neon_vsetq_lane_f32:
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
- case NEON::BI__builtin_neon_vsha1h_u32:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
- "vsha1h");
- case NEON::BI__builtin_neon_vsha1cq_u32:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
- "vsha1h");
- case NEON::BI__builtin_neon_vsha1pq_u32:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
- "vsha1h");
- case NEON::BI__builtin_neon_vsha1mq_u32:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
- "vsha1h");
- // The ARM _MoveToCoprocessor builtins put the input register value as
- // the first argument, but the LLVM intrinsic expects it as the third one.
- case ARM::BI_MoveToCoprocessor:
- case ARM::BI_MoveToCoprocessor2: {
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
- Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
- return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
- Ops[3], Ops[4], Ops[5]});
- }
- case ARM::BI_BitScanForward:
- case ARM::BI_BitScanForward64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
- case ARM::BI_BitScanReverse:
- case ARM::BI_BitScanReverse64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
- case ARM::BI_InterlockedAnd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
- case ARM::BI_InterlockedExchange64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
- case ARM::BI_InterlockedExchangeAdd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
- case ARM::BI_InterlockedExchangeSub64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
- case ARM::BI_InterlockedOr64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
- case ARM::BI_InterlockedXor64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
- case ARM::BI_InterlockedDecrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
- case ARM::BI_InterlockedIncrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
- case ARM::BI_InterlockedExchangeAdd8_acq:
- case ARM::BI_InterlockedExchangeAdd16_acq:
- case ARM::BI_InterlockedExchangeAdd_acq:
- case ARM::BI_InterlockedExchangeAdd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
- case ARM::BI_InterlockedExchangeAdd8_rel:
- case ARM::BI_InterlockedExchangeAdd16_rel:
- case ARM::BI_InterlockedExchangeAdd_rel:
- case ARM::BI_InterlockedExchangeAdd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
- case ARM::BI_InterlockedExchangeAdd8_nf:
- case ARM::BI_InterlockedExchangeAdd16_nf:
- case ARM::BI_InterlockedExchangeAdd_nf:
- case ARM::BI_InterlockedExchangeAdd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
- case ARM::BI_InterlockedExchange8_acq:
- case ARM::BI_InterlockedExchange16_acq:
- case ARM::BI_InterlockedExchange_acq:
- case ARM::BI_InterlockedExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
- case ARM::BI_InterlockedExchange8_rel:
- case ARM::BI_InterlockedExchange16_rel:
- case ARM::BI_InterlockedExchange_rel:
- case ARM::BI_InterlockedExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
- case ARM::BI_InterlockedExchange8_nf:
- case ARM::BI_InterlockedExchange16_nf:
- case ARM::BI_InterlockedExchange_nf:
- case ARM::BI_InterlockedExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
- case ARM::BI_InterlockedCompareExchange8_acq:
- case ARM::BI_InterlockedCompareExchange16_acq:
- case ARM::BI_InterlockedCompareExchange_acq:
- case ARM::BI_InterlockedCompareExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
- case ARM::BI_InterlockedCompareExchange8_rel:
- case ARM::BI_InterlockedCompareExchange16_rel:
- case ARM::BI_InterlockedCompareExchange_rel:
- case ARM::BI_InterlockedCompareExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
- case ARM::BI_InterlockedCompareExchange8_nf:
- case ARM::BI_InterlockedCompareExchange16_nf:
- case ARM::BI_InterlockedCompareExchange_nf:
- case ARM::BI_InterlockedCompareExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
- case ARM::BI_InterlockedOr8_acq:
- case ARM::BI_InterlockedOr16_acq:
- case ARM::BI_InterlockedOr_acq:
- case ARM::BI_InterlockedOr64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
- case ARM::BI_InterlockedOr8_rel:
- case ARM::BI_InterlockedOr16_rel:
- case ARM::BI_InterlockedOr_rel:
- case ARM::BI_InterlockedOr64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
- case ARM::BI_InterlockedOr8_nf:
- case ARM::BI_InterlockedOr16_nf:
- case ARM::BI_InterlockedOr_nf:
- case ARM::BI_InterlockedOr64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
- case ARM::BI_InterlockedXor8_acq:
- case ARM::BI_InterlockedXor16_acq:
- case ARM::BI_InterlockedXor_acq:
- case ARM::BI_InterlockedXor64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
- case ARM::BI_InterlockedXor8_rel:
- case ARM::BI_InterlockedXor16_rel:
- case ARM::BI_InterlockedXor_rel:
- case ARM::BI_InterlockedXor64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
- case ARM::BI_InterlockedXor8_nf:
- case ARM::BI_InterlockedXor16_nf:
- case ARM::BI_InterlockedXor_nf:
- case ARM::BI_InterlockedXor64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
- case ARM::BI_InterlockedAnd8_acq:
- case ARM::BI_InterlockedAnd16_acq:
- case ARM::BI_InterlockedAnd_acq:
- case ARM::BI_InterlockedAnd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
- case ARM::BI_InterlockedAnd8_rel:
- case ARM::BI_InterlockedAnd16_rel:
- case ARM::BI_InterlockedAnd_rel:
- case ARM::BI_InterlockedAnd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
- case ARM::BI_InterlockedAnd8_nf:
- case ARM::BI_InterlockedAnd16_nf:
- case ARM::BI_InterlockedAnd_nf:
- case ARM::BI_InterlockedAnd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
- case ARM::BI_InterlockedIncrement16_acq:
- case ARM::BI_InterlockedIncrement_acq:
- case ARM::BI_InterlockedIncrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
- case ARM::BI_InterlockedIncrement16_rel:
- case ARM::BI_InterlockedIncrement_rel:
- case ARM::BI_InterlockedIncrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
- case ARM::BI_InterlockedIncrement16_nf:
- case ARM::BI_InterlockedIncrement_nf:
- case ARM::BI_InterlockedIncrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
- case ARM::BI_InterlockedDecrement16_acq:
- case ARM::BI_InterlockedDecrement_acq:
- case ARM::BI_InterlockedDecrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
- case ARM::BI_InterlockedDecrement16_rel:
- case ARM::BI_InterlockedDecrement_rel:
- case ARM::BI_InterlockedDecrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
- case ARM::BI_InterlockedDecrement16_nf:
- case ARM::BI_InterlockedDecrement_nf:
- case ARM::BI_InterlockedDecrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
- }
- // Get the last argument, which specifies the vector type.
- assert(HasExtraArg);
- llvm::APSInt Result;
- const Expr *Arg = E->getArg(E->getNumArgs()-1);
- if (!Arg->isIntegerConstantExpr(Result, getContext()))
- return nullptr;
- if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
- BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
- // Determine the overloaded type of this builtin.
- llvm::Type *Ty;
- if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
- Ty = FloatTy;
- else
- Ty = DoubleTy;
- // Determine whether this is an unsigned conversion or not.
- bool usgn = Result.getZExtValue() == 1;
- unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
- // Call the appropriate intrinsic.
- Function *F = CGM.getIntrinsic(Int, Ty);
- return Builder.CreateCall(F, Ops, "vcvtr");
- }
- // Determine the type of this overloaded NEON intrinsic.
- NeonTypeFlags Type(Result.getZExtValue());
- bool usgn = Type.isUnsigned();
- bool rightShift = false;
- llvm::VectorType *VTy = GetNeonType(this, Type,
- getTarget().hasLegalHalfType());
- llvm::Type *Ty = VTy;
- if (!Ty)
- return nullptr;
- // Many NEON builtins have identical semantics and uses in ARM and
- // AArch64. Emit these in a single function.
- auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
- const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
- IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
- if (Builtin)
- return EmitCommonNeonBuiltinExpr(
- Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
- Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
- unsigned Int;
- switch (BuiltinID) {
- default: return nullptr;
- case NEON::BI__builtin_neon_vld1q_lane_v:
- // Handle 64-bit integer elements as a special case. Use shuffles of
- // one-element vectors to avoid poor code for i64 in the backend.
- if (VTy->getElementType()->isIntegerTy(64)) {
- // Extract the other lane.
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
- Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
- Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
- // Load the value as a one-element vector.
- Ty = llvm::VectorType::get(VTy->getElementType(), 1);
- llvm::Type *Tys[] = {Ty, Int8PtrTy};
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
- Value *Align = getAlignmentValue32(PtrOp0);
- Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
- // Combine them.
- uint32_t Indices[] = {1 - Lane, Lane};
- SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
- return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
- }
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vld1_lane_v: {
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
- Value *Ld = Builder.CreateLoad(PtrOp0);
- return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
- }
- case NEON::BI__builtin_neon_vqrshrn_n_v:
- Int =
- usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
- 1, true);
- case NEON::BI__builtin_neon_vqrshrun_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
- Ops, "vqrshrun_n", 1, true);
- case NEON::BI__builtin_neon_vqshrn_n_v:
- Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
- 1, true);
- case NEON::BI__builtin_neon_vqshrun_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
- Ops, "vqshrun_n", 1, true);
- case NEON::BI__builtin_neon_vrecpe_v:
- case NEON::BI__builtin_neon_vrecpeq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
- Ops, "vrecpe");
- case NEON::BI__builtin_neon_vrshrn_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
- Ops, "vrshrn_n", 1, true);
- case NEON::BI__builtin_neon_vrsra_n_v:
- case NEON::BI__builtin_neon_vrsraq_n_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
- Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
- return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
- case NEON::BI__builtin_neon_vsri_n_v:
- case NEON::BI__builtin_neon_vsriq_n_v:
- rightShift = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vsli_n_v:
- case NEON::BI__builtin_neon_vsliq_n_v:
- Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
- Ops, "vsli_n");
- case NEON::BI__builtin_neon_vsra_n_v:
- case NEON::BI__builtin_neon_vsraq_n_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
- return Builder.CreateAdd(Ops[0], Ops[1]);
- case NEON::BI__builtin_neon_vst1q_lane_v:
- // Handle 64-bit integer elements as a special case. Use a shuffle to get
- // a one-element vector and avoid poor code for i64 in the backend.
- if (VTy->getElementType()->isIntegerTy(64)) {
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
- Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
- Ops[2] = getAlignmentValue32(PtrOp0);
- llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
- Tys), Ops);
- }
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vst1_lane_v: {
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
- return St;
- }
- case NEON::BI__builtin_neon_vtbl1_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
- Ops, "vtbl1");
- case NEON::BI__builtin_neon_vtbl2_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
- Ops, "vtbl2");
- case NEON::BI__builtin_neon_vtbl3_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
- Ops, "vtbl3");
- case NEON::BI__builtin_neon_vtbl4_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
- Ops, "vtbl4");
- case NEON::BI__builtin_neon_vtbx1_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
- Ops, "vtbx1");
- case NEON::BI__builtin_neon_vtbx2_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
- Ops, "vtbx2");
- case NEON::BI__builtin_neon_vtbx3_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
- Ops, "vtbx3");
- case NEON::BI__builtin_neon_vtbx4_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
- Ops, "vtbx4");
- }
- }
- static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
- const CallExpr *E,
- SmallVectorImpl<Value *> &Ops,
- llvm::Triple::ArchType Arch) {
- unsigned int Int = 0;
- const char *s = nullptr;
- switch (BuiltinID) {
- default:
- return nullptr;
- case NEON::BI__builtin_neon_vtbl1_v:
- case NEON::BI__builtin_neon_vqtbl1_v:
- case NEON::BI__builtin_neon_vqtbl1q_v:
- case NEON::BI__builtin_neon_vtbl2_v:
- case NEON::BI__builtin_neon_vqtbl2_v:
- case NEON::BI__builtin_neon_vqtbl2q_v:
- case NEON::BI__builtin_neon_vtbl3_v:
- case NEON::BI__builtin_neon_vqtbl3_v:
- case NEON::BI__builtin_neon_vqtbl3q_v:
- case NEON::BI__builtin_neon_vtbl4_v:
- case NEON::BI__builtin_neon_vqtbl4_v:
- case NEON::BI__builtin_neon_vqtbl4q_v:
- break;
- case NEON::BI__builtin_neon_vtbx1_v:
- case NEON::BI__builtin_neon_vqtbx1_v:
- case NEON::BI__builtin_neon_vqtbx1q_v:
- case NEON::BI__builtin_neon_vtbx2_v:
- case NEON::BI__builtin_neon_vqtbx2_v:
- case NEON::BI__builtin_neon_vqtbx2q_v:
- case NEON::BI__builtin_neon_vtbx3_v:
- case NEON::BI__builtin_neon_vqtbx3_v:
- case NEON::BI__builtin_neon_vqtbx3q_v:
- case NEON::BI__builtin_neon_vtbx4_v:
- case NEON::BI__builtin_neon_vqtbx4_v:
- case NEON::BI__builtin_neon_vqtbx4q_v:
- break;
- }
- assert(E->getNumArgs() >= 3);
- // Get the last argument, which specifies the vector type.
- llvm::APSInt Result;
- const Expr *Arg = E->getArg(E->getNumArgs() - 1);
- if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
- return nullptr;
- // Determine the type of this overloaded NEON intrinsic.
- NeonTypeFlags Type(Result.getZExtValue());
- llvm::VectorType *Ty = GetNeonType(&CGF, Type);
- if (!Ty)
- return nullptr;
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- // AArch64 scalar builtins are not overloaded, they do not have an extra
- // argument that specifies the vector type, need to handle each case.
- switch (BuiltinID) {
- case NEON::BI__builtin_neon_vtbl1_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
- Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
- "vtbl1");
- }
- case NEON::BI__builtin_neon_vtbl2_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
- Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
- "vtbl1");
- }
- case NEON::BI__builtin_neon_vtbl3_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
- Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
- "vtbl2");
- }
- case NEON::BI__builtin_neon_vtbl4_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
- Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
- "vtbl2");
- }
- case NEON::BI__builtin_neon_vtbx1_v: {
- Value *TblRes =
- packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
- Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
- llvm::Constant *EightV = ConstantInt::get(Ty, 8);
- Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
- CmpRes = Builder.CreateSExt(CmpRes, Ty);
- Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
- Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
- return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
- }
- case NEON::BI__builtin_neon_vtbx2_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
- Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
- "vtbx1");
- }
- case NEON::BI__builtin_neon_vtbx3_v: {
- Value *TblRes =
- packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
- Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
- llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
- Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
- TwentyFourV);
- CmpRes = Builder.CreateSExt(CmpRes, Ty);
- Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
- Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
- return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
- }
- case NEON::BI__builtin_neon_vtbx4_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
- Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
- "vtbx2");
- }
- case NEON::BI__builtin_neon_vqtbl1_v:
- case NEON::BI__builtin_neon_vqtbl1q_v:
- Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
- case NEON::BI__builtin_neon_vqtbl2_v:
- case NEON::BI__builtin_neon_vqtbl2q_v: {
- Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
- case NEON::BI__builtin_neon_vqtbl3_v:
- case NEON::BI__builtin_neon_vqtbl3q_v:
- Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
- case NEON::BI__builtin_neon_vqtbl4_v:
- case NEON::BI__builtin_neon_vqtbl4q_v:
- Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
- case NEON::BI__builtin_neon_vqtbx1_v:
- case NEON::BI__builtin_neon_vqtbx1q_v:
- Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
- case NEON::BI__builtin_neon_vqtbx2_v:
- case NEON::BI__builtin_neon_vqtbx2q_v:
- Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
- case NEON::BI__builtin_neon_vqtbx3_v:
- case NEON::BI__builtin_neon_vqtbx3q_v:
- Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
- case NEON::BI__builtin_neon_vqtbx4_v:
- case NEON::BI__builtin_neon_vqtbx4q_v:
- Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
- }
- }
- if (!Int)
- return nullptr;
- Function *F = CGF.CGM.getIntrinsic(Int, Ty);
- return CGF.EmitNeonCall(F, Ops, s);
- }
- Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
- llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
- Op = Builder.CreateBitCast(Op, Int16Ty);
- Value *V = UndefValue::get(VTy);
- llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
- Op = Builder.CreateInsertElement(V, Op, CI);
- return Op;
- }
- Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
- const CallExpr *E,
- llvm::Triple::ArchType Arch) {
- unsigned HintID = static_cast<unsigned>(-1);
- switch (BuiltinID) {
- default: break;
- case AArch64::BI__builtin_arm_nop:
- HintID = 0;
- break;
- case AArch64::BI__builtin_arm_yield:
- case AArch64::BI__yield:
- HintID = 1;
- break;
- case AArch64::BI__builtin_arm_wfe:
- case AArch64::BI__wfe:
- HintID = 2;
- break;
- case AArch64::BI__builtin_arm_wfi:
- case AArch64::BI__wfi:
- HintID = 3;
- break;
- case AArch64::BI__builtin_arm_sev:
- case AArch64::BI__sev:
- HintID = 4;
- break;
- case AArch64::BI__builtin_arm_sevl:
- case AArch64::BI__sevl:
- HintID = 5;
- break;
- }
- if (HintID != static_cast<unsigned>(-1)) {
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
- return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
- }
- if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *RW = EmitScalarExpr(E->getArg(1));
- Value *CacheLevel = EmitScalarExpr(E->getArg(2));
- Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
- Value *IsData = EmitScalarExpr(E->getArg(4));
- Value *Locality = nullptr;
- if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
- // Temporal fetch, needs to convert cache level to locality.
- Locality = llvm::ConstantInt::get(Int32Ty,
- -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
- } else {
- // Streaming fetch.
- Locality = llvm::ConstantInt::get(Int32Ty, 0);
- }
- // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
- // PLDL3STRM or PLDL2STRM.
- Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
- return Builder.CreateCall(F, {Address, RW, Locality, IsData});
- }
- if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
- assert((getContext().getTypeSize(E->getType()) == 32) &&
- "rbit of unusual size!");
- llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(
- CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
- }
- if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
- assert((getContext().getTypeSize(E->getType()) == 64) &&
- "rbit of unusual size!");
- llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(
- CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
- }
- if (BuiltinID == AArch64::BI__clear_cache) {
- assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
- const FunctionDecl *FD = E->getDirectCallee();
- Value *Ops[2];
- for (unsigned i = 0; i < 2; i++)
- Ops[i] = EmitScalarExpr(E->getArg(i));
- llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
- llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
- StringRef Name = FD->getName();
- return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
- }
- if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
- getContext().getTypeSize(E->getType()) == 128) {
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
- ? Intrinsic::aarch64_ldaxp
- : Intrinsic::aarch64_ldxp);
- Value *LdPtr = EmitScalarExpr(E->getArg(0));
- Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
- "ldxp");
- Value *Val0 = Builder.CreateExtractValue(Val, 1);
- Value *Val1 = Builder.CreateExtractValue(Val, 0);
- llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
- Val0 = Builder.CreateZExt(Val0, Int128Ty);
- Val1 = Builder.CreateZExt(Val1, Int128Ty);
- Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
- Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
- Val = Builder.CreateOr(Val, Val1);
- return Builder.CreateBitCast(Val, ConvertType(E->getType()));
- } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex) {
- Value *LoadAddr = EmitScalarExpr(E->getArg(0));
- QualType Ty = E->getType();
- llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *PtrTy = llvm::IntegerType::get(
- getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
- LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
- ? Intrinsic::aarch64_ldaxr
- : Intrinsic::aarch64_ldxr,
- PtrTy);
- Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
- if (RealResTy->isPointerTy())
- return Builder.CreateIntToPtr(Val, RealResTy);
- llvm::Type *IntResTy = llvm::IntegerType::get(
- getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
- Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
- return Builder.CreateBitCast(Val, RealResTy);
- }
- if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) &&
- getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
- ? Intrinsic::aarch64_stlxp
- : Intrinsic::aarch64_stxp);
- llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
- Address Tmp = CreateMemTemp(E->getArg(0)->getType());
- EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
- Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
- llvm::Value *Val = Builder.CreateLoad(Tmp);
- Value *Arg0 = Builder.CreateExtractValue(Val, 0);
- Value *Arg1 = Builder.CreateExtractValue(Val, 1);
- Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
- Int8PtrTy);
- return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
- }
- if (BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) {
- Value *StoreVal = EmitScalarExpr(E->getArg(0));
- Value *StoreAddr = EmitScalarExpr(E->getArg(1));
- QualType Ty = E->getArg(0)->getType();
- llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
- if (StoreVal->getType()->isPointerTy())
- StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
- else {
- llvm::Type *IntTy = llvm::IntegerType::get(
- getLLVMContext(),
- CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
- StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
- StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
- }
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
- ? Intrinsic::aarch64_stlxr
- : Intrinsic::aarch64_stxr,
- StoreAddr->getType());
- return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
- }
- if (BuiltinID == AArch64::BI__getReg) {
- Expr::EvalResult Result;
- if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
- llvm_unreachable("Sema will ensure that the parameter is constant");
- llvm::APSInt Value = Result.Val.getInt();
- LLVMContext &Context = CGM.getLLVMContext();
- std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
- llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
- llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
- llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
- llvm::Function *F =
- CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
- return Builder.CreateCall(F, Metadata);
- }
- if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
- return Builder.CreateCall(F);
- }
- if (BuiltinID == AArch64::BI_ReadWriteBarrier)
- return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
- llvm::SyncScope::SingleThread);
- // CRC32
- Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
- switch (BuiltinID) {
- case AArch64::BI__builtin_arm_crc32b:
- CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
- case AArch64::BI__builtin_arm_crc32cb:
- CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
- case AArch64::BI__builtin_arm_crc32h:
- CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
- case AArch64::BI__builtin_arm_crc32ch:
- CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
- case AArch64::BI__builtin_arm_crc32w:
- CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
- case AArch64::BI__builtin_arm_crc32cw:
- CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
- case AArch64::BI__builtin_arm_crc32d:
- CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
- case AArch64::BI__builtin_arm_crc32cd:
- CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
- }
- if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
- Value *Arg0 = EmitScalarExpr(E->getArg(0));
- Value *Arg1 = EmitScalarExpr(E->getArg(1));
- Function *F = CGM.getIntrinsic(CRCIntrinsicID);
- llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
- Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
- return Builder.CreateCall(F, {Arg0, Arg1});
- }
- // Memory Tagging Extensions (MTE) Intrinsics
- Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
- switch (BuiltinID) {
- case AArch64::BI__builtin_arm_irg:
- MTEIntrinsicID = Intrinsic::aarch64_irg; break;
- case AArch64::BI__builtin_arm_addg:
- MTEIntrinsicID = Intrinsic::aarch64_addg; break;
- case AArch64::BI__builtin_arm_gmi:
- MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
- case AArch64::BI__builtin_arm_ldg:
- MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
- case AArch64::BI__builtin_arm_stg:
- MTEIntrinsicID = Intrinsic::aarch64_stg; break;
- case AArch64::BI__builtin_arm_subp:
- MTEIntrinsicID = Intrinsic::aarch64_subp; break;
- }
- if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
- llvm::Type *T = ConvertType(E->getType());
- if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
- Value *Pointer = EmitScalarExpr(E->getArg(0));
- Value *Mask = EmitScalarExpr(E->getArg(1));
- Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
- Mask = Builder.CreateZExt(Mask, Int64Ty);
- Value *RV = Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
- return Builder.CreatePointerCast(RV, T);
- }
- if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
- Value *Pointer = EmitScalarExpr(E->getArg(0));
- Value *TagOffset = EmitScalarExpr(E->getArg(1));
- Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
- TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
- Value *RV = Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
- return Builder.CreatePointerCast(RV, T);
- }
- if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
- Value *Pointer = EmitScalarExpr(E->getArg(0));
- Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
- ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
- Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
- return Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
- }
- // Although it is possible to supply a different return
- // address (first arg) to this intrinsic, for now we set
- // return address same as input address.
- if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
- Value *TagAddress = EmitScalarExpr(E->getArg(0));
- TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
- Value *RV = Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
- return Builder.CreatePointerCast(RV, T);
- }
- // Although it is possible to supply a different tag (to set)
- // to this intrinsic (as first arg), for now we supply
- // the tag that is in input address arg (common use case).
- if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
- Value *TagAddress = EmitScalarExpr(E->getArg(0));
- TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
- return Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
- }
- if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
- Value *PointerA = EmitScalarExpr(E->getArg(0));
- Value *PointerB = EmitScalarExpr(E->getArg(1));
- PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
- PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
- return Builder.CreateCall(
- CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
- }
- }
- if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsr ||
- BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp) {
- bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp;
- bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp;
- bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
- BuiltinID != AArch64::BI__builtin_arm_wsr;
- llvm::Type *ValueType;
- llvm::Type *RegisterType = Int64Ty;
- if (IsPointerBuiltin) {
- ValueType = VoidPtrTy;
- } else if (Is64Bit) {
- ValueType = Int64Ty;
- } else {
- ValueType = Int32Ty;
- }
- return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
- }
- if (BuiltinID == AArch64::BI_ReadStatusReg ||
- BuiltinID == AArch64::BI_WriteStatusReg) {
- LLVMContext &Context = CGM.getLLVMContext();
- unsigned SysReg =
- E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
- std::string SysRegStr;
- llvm::raw_string_ostream(SysRegStr) <<
- ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
- ((SysReg >> 11) & 7) << ":" <<
- ((SysReg >> 7) & 15) << ":" <<
- ((SysReg >> 3) & 15) << ":" <<
- ( SysReg & 7);
- llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
- llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
- llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
- llvm::Type *RegisterType = Int64Ty;
- llvm::Type *Types[] = { RegisterType };
- if (BuiltinID == AArch64::BI_ReadStatusReg) {
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
- return Builder.CreateCall(F, Metadata);
- }
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
- llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
- return Builder.CreateCall(F, { Metadata, ArgValue });
- }
- if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
- return Builder.CreateCall(F);
- }
- if (BuiltinID == AArch64::BI__builtin_sponentry) {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry);
- return Builder.CreateCall(F);
- }
- // Find out if any arguments are required to be integer constant
- // expressions.
- unsigned ICEArguments = 0;
- ASTContext::GetBuiltinTypeError Error;
- getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
- assert(Error == ASTContext::GE_None && "Should not codegen an error");
- llvm::SmallVector<Value*, 4> Ops;
- for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
- if ((ICEArguments & (1 << i)) == 0) {
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- } else {
- // If this is required to be a constant, constant fold it so that we know
- // that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
- assert(IsConst && "Constant arg isn't actually constant?");
- (void)IsConst;
- Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
- }
- }
- auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
- const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
- SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
- if (Builtin) {
- Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
- Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
- assert(Result && "SISD intrinsic should have been handled");
- return Result;
- }
- llvm::APSInt Result;
- const Expr *Arg = E->getArg(E->getNumArgs()-1);
- NeonTypeFlags Type(0);
- if (Arg->isIntegerConstantExpr(Result, getContext()))
- // Determine the type of this overloaded NEON intrinsic.
- Type = NeonTypeFlags(Result.getZExtValue());
- bool usgn = Type.isUnsigned();
- bool quad = Type.isQuad();
- // Handle non-overloaded intrinsics first.
- switch (BuiltinID) {
- default: break;
- case NEON::BI__builtin_neon_vabsh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
- case NEON::BI__builtin_neon_vldrq_p128: {
- llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
- llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
- Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
- return Builder.CreateAlignedLoad(Int128Ty, Ptr,
- CharUnits::fromQuantity(16));
- }
- case NEON::BI__builtin_neon_vstrq_p128: {
- llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
- Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
- return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
- }
- case NEON::BI__builtin_neon_vcvts_u32_f32:
- case NEON::BI__builtin_neon_vcvtd_u64_f64:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvts_s32_f32:
- case NEON::BI__builtin_neon_vcvtd_s64_f64: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
- llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
- llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
- Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], InTy);
- return Builder.CreateFPToSI(Ops[0], InTy);
- }
- case NEON::BI__builtin_neon_vcvts_f32_u32:
- case NEON::BI__builtin_neon_vcvtd_f64_u64:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvts_f32_s32:
- case NEON::BI__builtin_neon_vcvtd_f64_s64: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
- llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
- llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
- Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
- if (usgn)
- return Builder.CreateUIToFP(Ops[0], FTy);
- return Builder.CreateSIToFP(Ops[0], FTy);
- }
- case NEON::BI__builtin_neon_vcvth_f16_u16:
- case NEON::BI__builtin_neon_vcvth_f16_u32:
- case NEON::BI__builtin_neon_vcvth_f16_u64:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvth_f16_s16:
- case NEON::BI__builtin_neon_vcvth_f16_s32:
- case NEON::BI__builtin_neon_vcvth_f16_s64: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- llvm::Type *FTy = HalfTy;
- llvm::Type *InTy;
- if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
- InTy = Int64Ty;
- else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
- InTy = Int32Ty;
- else
- InTy = Int16Ty;
- Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
- if (usgn)
- return Builder.CreateUIToFP(Ops[0], FTy);
- return Builder.CreateSIToFP(Ops[0], FTy);
- }
- case NEON::BI__builtin_neon_vcvth_u16_f16:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvth_s16_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], Int16Ty);
- return Builder.CreateFPToSI(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vcvth_u32_f16:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvth_s32_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], Int32Ty);
- return Builder.CreateFPToSI(Ops[0], Int32Ty);
- }
- case NEON::BI__builtin_neon_vcvth_u64_f16:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vcvth_s64_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], Int64Ty);
- return Builder.CreateFPToSI(Ops[0], Int64Ty);
- }
- case NEON::BI__builtin_neon_vcvtah_u16_f16:
- case NEON::BI__builtin_neon_vcvtmh_u16_f16:
- case NEON::BI__builtin_neon_vcvtnh_u16_f16:
- case NEON::BI__builtin_neon_vcvtph_u16_f16:
- case NEON::BI__builtin_neon_vcvtah_s16_f16:
- case NEON::BI__builtin_neon_vcvtmh_s16_f16:
- case NEON::BI__builtin_neon_vcvtnh_s16_f16:
- case NEON::BI__builtin_neon_vcvtph_s16_f16: {
- unsigned Int;
- llvm::Type* InTy = Int32Ty;
- llvm::Type* FTy = HalfTy;
- llvm::Type *Tys[2] = {InTy, FTy};
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vcvtah_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtau; break;
- case NEON::BI__builtin_neon_vcvtmh_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtmu; break;
- case NEON::BI__builtin_neon_vcvtnh_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtnu; break;
- case NEON::BI__builtin_neon_vcvtph_u16_f16:
- Int = Intrinsic::aarch64_neon_fcvtpu; break;
- case NEON::BI__builtin_neon_vcvtah_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtas; break;
- case NEON::BI__builtin_neon_vcvtmh_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtms; break;
- case NEON::BI__builtin_neon_vcvtnh_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtns; break;
- case NEON::BI__builtin_neon_vcvtph_s16_f16:
- Int = Intrinsic::aarch64_neon_fcvtps; break;
- }
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vcaleh_f16:
- case NEON::BI__builtin_neon_vcalth_f16:
- case NEON::BI__builtin_neon_vcageh_f16:
- case NEON::BI__builtin_neon_vcagth_f16: {
- unsigned Int;
- llvm::Type* InTy = Int32Ty;
- llvm::Type* FTy = HalfTy;
- llvm::Type *Tys[2] = {InTy, FTy};
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vcageh_f16:
- Int = Intrinsic::aarch64_neon_facge; break;
- case NEON::BI__builtin_neon_vcagth_f16:
- Int = Intrinsic::aarch64_neon_facgt; break;
- case NEON::BI__builtin_neon_vcaleh_f16:
- Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
- case NEON::BI__builtin_neon_vcalth_f16:
- Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
- }
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vcvth_n_s16_f16:
- case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
- unsigned Int;
- llvm::Type* InTy = Int32Ty;
- llvm::Type* FTy = HalfTy;
- llvm::Type *Tys[2] = {InTy, FTy};
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vcvth_n_s16_f16:
- Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
- case NEON::BI__builtin_neon_vcvth_n_u16_f16:
- Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
- }
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vcvth_n_f16_s16:
- case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
- unsigned Int;
- llvm::Type* FTy = HalfTy;
- llvm::Type* InTy = Int32Ty;
- llvm::Type *Tys[2] = {FTy, InTy};
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vcvth_n_f16_s16:
- Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
- Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
- break;
- case NEON::BI__builtin_neon_vcvth_n_f16_u16:
- Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
- Ops[0] = Builder.CreateZExt(Ops[0], InTy);
- break;
- }
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
- }
- case NEON::BI__builtin_neon_vpaddd_s64: {
- llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
- Value *Vec = EmitScalarExpr(E->getArg(0));
- // The vector is v2f64, so make sure it's bitcast to that.
- Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
- llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
- llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
- Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
- Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
- // Pairwise addition of a v2f64 into a scalar f64.
- return Builder.CreateAdd(Op0, Op1, "vpaddd");
- }
- case NEON::BI__builtin_neon_vpaddd_f64: {
- llvm::Type *Ty =
- llvm::VectorType::get(DoubleTy, 2);
- Value *Vec = EmitScalarExpr(E->getArg(0));
- // The vector is v2f64, so make sure it's bitcast to that.
- Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
- llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
- llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
- Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
- Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
- // Pairwise addition of a v2f64 into a scalar f64.
- return Builder.CreateFAdd(Op0, Op1, "vpaddd");
- }
- case NEON::BI__builtin_neon_vpadds_f32: {
- llvm::Type *Ty =
- llvm::VectorType::get(FloatTy, 2);
- Value *Vec = EmitScalarExpr(E->getArg(0));
- // The vector is v2f32, so make sure it's bitcast to that.
- Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
- llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
- llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
- Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
- Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
- // Pairwise addition of a v2f32 into a scalar f32.
- return Builder.CreateFAdd(Op0, Op1, "vpaddd");
- }
- case NEON::BI__builtin_neon_vceqzd_s64:
- case NEON::BI__builtin_neon_vceqzd_f64:
- case NEON::BI__builtin_neon_vceqzs_f32:
- case NEON::BI__builtin_neon_vceqzh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitAArch64CompareBuiltinExpr(
- Ops[0], ConvertType(E->getCallReturnType(getContext())),
- ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
- case NEON::BI__builtin_neon_vcgezd_s64:
- case NEON::BI__builtin_neon_vcgezd_f64:
- case NEON::BI__builtin_neon_vcgezs_f32:
- case NEON::BI__builtin_neon_vcgezh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitAArch64CompareBuiltinExpr(
- Ops[0], ConvertType(E->getCallReturnType(getContext())),
- ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
- case NEON::BI__builtin_neon_vclezd_s64:
- case NEON::BI__builtin_neon_vclezd_f64:
- case NEON::BI__builtin_neon_vclezs_f32:
- case NEON::BI__builtin_neon_vclezh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitAArch64CompareBuiltinExpr(
- Ops[0], ConvertType(E->getCallReturnType(getContext())),
- ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
- case NEON::BI__builtin_neon_vcgtzd_s64:
- case NEON::BI__builtin_neon_vcgtzd_f64:
- case NEON::BI__builtin_neon_vcgtzs_f32:
- case NEON::BI__builtin_neon_vcgtzh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitAArch64CompareBuiltinExpr(
- Ops[0], ConvertType(E->getCallReturnType(getContext())),
- ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
- case NEON::BI__builtin_neon_vcltzd_s64:
- case NEON::BI__builtin_neon_vcltzd_f64:
- case NEON::BI__builtin_neon_vcltzs_f32:
- case NEON::BI__builtin_neon_vcltzh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitAArch64CompareBuiltinExpr(
- Ops[0], ConvertType(E->getCallReturnType(getContext())),
- ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
- case NEON::BI__builtin_neon_vceqzd_u64: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
- Ops[0] =
- Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
- return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
- }
- case NEON::BI__builtin_neon_vceqd_f64:
- case NEON::BI__builtin_neon_vcled_f64:
- case NEON::BI__builtin_neon_vcltd_f64:
- case NEON::BI__builtin_neon_vcged_f64:
- case NEON::BI__builtin_neon_vcgtd_f64: {
- llvm::CmpInst::Predicate P;
- switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
- case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
- case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
- case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
- case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
- }
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
- return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
- }
- case NEON::BI__builtin_neon_vceqs_f32:
- case NEON::BI__builtin_neon_vcles_f32:
- case NEON::BI__builtin_neon_vclts_f32:
- case NEON::BI__builtin_neon_vcges_f32:
- case NEON::BI__builtin_neon_vcgts_f32: {
- llvm::CmpInst::Predicate P;
- switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
- case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
- case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
- case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
- case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
- }
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
- return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
- }
- case NEON::BI__builtin_neon_vceqh_f16:
- case NEON::BI__builtin_neon_vcleh_f16:
- case NEON::BI__builtin_neon_vclth_f16:
- case NEON::BI__builtin_neon_vcgeh_f16:
- case NEON::BI__builtin_neon_vcgth_f16: {
- llvm::CmpInst::Predicate P;
- switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
- case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
- case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
- case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
- case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
- }
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
- return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
- }
- case NEON::BI__builtin_neon_vceqd_s64:
- case NEON::BI__builtin_neon_vceqd_u64:
- case NEON::BI__builtin_neon_vcgtd_s64:
- case NEON::BI__builtin_neon_vcgtd_u64:
- case NEON::BI__builtin_neon_vcltd_s64:
- case NEON::BI__builtin_neon_vcltd_u64:
- case NEON::BI__builtin_neon_vcged_u64:
- case NEON::BI__builtin_neon_vcged_s64:
- case NEON::BI__builtin_neon_vcled_u64:
- case NEON::BI__builtin_neon_vcled_s64: {
- llvm::CmpInst::Predicate P;
- switch (BuiltinID) {
- default: llvm_unreachable("missing builtin ID in switch!");
- case NEON::BI__builtin_neon_vceqd_s64:
- case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
- case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
- case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
- case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
- case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
- case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
- case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
- case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
- case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
- }
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
- Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
- return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
- }
- case NEON::BI__builtin_neon_vtstd_s64:
- case NEON::BI__builtin_neon_vtstd_u64: {
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
- Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
- Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
- llvm::Constant::getNullValue(Int64Ty));
- return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
- }
- case NEON::BI__builtin_neon_vset_lane_i8:
- case NEON::BI__builtin_neon_vset_lane_i16:
- case NEON::BI__builtin_neon_vset_lane_i32:
- case NEON::BI__builtin_neon_vset_lane_i64:
- case NEON::BI__builtin_neon_vset_lane_f32:
- case NEON::BI__builtin_neon_vsetq_lane_i8:
- case NEON::BI__builtin_neon_vsetq_lane_i16:
- case NEON::BI__builtin_neon_vsetq_lane_i32:
- case NEON::BI__builtin_neon_vsetq_lane_i64:
- case NEON::BI__builtin_neon_vsetq_lane_f32:
- Ops.push_back(EmitScalarExpr(E->getArg(2)));
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
- case NEON::BI__builtin_neon_vset_lane_f64:
- // The vector type needs a cast for the v1f64 variant.
- Ops[1] = Builder.CreateBitCast(Ops[1],
- llvm::VectorType::get(DoubleTy, 1));
- Ops.push_back(EmitScalarExpr(E->getArg(2)));
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
- case NEON::BI__builtin_neon_vsetq_lane_f64:
- // The vector type needs a cast for the v2f64 variant.
- Ops[1] = Builder.CreateBitCast(Ops[1],
- llvm::VectorType::get(DoubleTy, 2));
- Ops.push_back(EmitScalarExpr(E->getArg(2)));
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
- case NEON::BI__builtin_neon_vget_lane_i8:
- case NEON::BI__builtin_neon_vdupb_lane_i8:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
- case NEON::BI__builtin_neon_vgetq_lane_i8:
- case NEON::BI__builtin_neon_vdupb_laneq_i8:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vgetq_lane");
- case NEON::BI__builtin_neon_vget_lane_i16:
- case NEON::BI__builtin_neon_vduph_lane_i16:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
- case NEON::BI__builtin_neon_vgetq_lane_i16:
- case NEON::BI__builtin_neon_vduph_laneq_i16:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vgetq_lane");
- case NEON::BI__builtin_neon_vget_lane_i32:
- case NEON::BI__builtin_neon_vdups_lane_i32:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
- case NEON::BI__builtin_neon_vdups_lane_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 2));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vdups_lane");
- case NEON::BI__builtin_neon_vgetq_lane_i32:
- case NEON::BI__builtin_neon_vdups_laneq_i32:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vgetq_lane");
- case NEON::BI__builtin_neon_vget_lane_i64:
- case NEON::BI__builtin_neon_vdupd_lane_i64:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
- case NEON::BI__builtin_neon_vdupd_lane_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 1));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vdupd_lane");
- case NEON::BI__builtin_neon_vgetq_lane_i64:
- case NEON::BI__builtin_neon_vdupd_laneq_i64:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vgetq_lane");
- case NEON::BI__builtin_neon_vget_lane_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 2));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
- case NEON::BI__builtin_neon_vget_lane_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 1));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
- case NEON::BI__builtin_neon_vgetq_lane_f32:
- case NEON::BI__builtin_neon_vdups_laneq_f32:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(FloatTy, 4));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vgetq_lane");
- case NEON::BI__builtin_neon_vgetq_lane_f64:
- case NEON::BI__builtin_neon_vdupd_laneq_f64:
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::VectorType::get(DoubleTy, 2));
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vgetq_lane");
- case NEON::BI__builtin_neon_vaddh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
- case NEON::BI__builtin_neon_vsubh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
- case NEON::BI__builtin_neon_vmulh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
- case NEON::BI__builtin_neon_vdivh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
- case NEON::BI__builtin_neon_vfmah_f16: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
- // NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F,
- {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
- }
- case NEON::BI__builtin_neon_vfmsh_f16: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
- Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
- // NEON intrinsic puts accumulator first, unlike the LLVM fma.
- return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
- }
- case NEON::BI__builtin_neon_vaddd_s64:
- case NEON::BI__builtin_neon_vaddd_u64:
- return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
- case NEON::BI__builtin_neon_vsubd_s64:
- case NEON::BI__builtin_neon_vsubd_u64:
- return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
- case NEON::BI__builtin_neon_vqdmlalh_s16:
- case NEON::BI__builtin_neon_vqdmlslh_s16: {
- SmallVector<Value *, 2> ProductOps;
- ProductOps.push_back(vectorWrapScalar16(Ops[1]));
- ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
- llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
- Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
- ProductOps, "vqdmlXl");
- Constant *CI = ConstantInt::get(SizeTy, 0);
- Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
- unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
- ? Intrinsic::aarch64_neon_sqadd
- : Intrinsic::aarch64_neon_sqsub;
- return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
- }
- case NEON::BI__builtin_neon_vqshlud_n_s64: {
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
- Ops, "vqshlu_n");
- }
- case NEON::BI__builtin_neon_vqshld_n_u64:
- case NEON::BI__builtin_neon_vqshld_n_s64: {
- unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
- ? Intrinsic::aarch64_neon_uqshl
- : Intrinsic::aarch64_neon_sqshl;
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
- return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
- }
- case NEON::BI__builtin_neon_vrshrd_n_u64:
- case NEON::BI__builtin_neon_vrshrd_n_s64: {
- unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
- ? Intrinsic::aarch64_neon_urshl
- : Intrinsic::aarch64_neon_srshl;
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
- Ops[1] = ConstantInt::get(Int64Ty, -SV);
- return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
- }
- case NEON::BI__builtin_neon_vrsrad_n_u64:
- case NEON::BI__builtin_neon_vrsrad_n_s64: {
- unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
- ? Intrinsic::aarch64_neon_urshl
- : Intrinsic::aarch64_neon_srshl;
- Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
- Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
- Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
- {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
- return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
- }
- case NEON::BI__builtin_neon_vshld_n_s64:
- case NEON::BI__builtin_neon_vshld_n_u64: {
- llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
- return Builder.CreateShl(
- Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
- }
- case NEON::BI__builtin_neon_vshrd_n_s64: {
- llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
- return Builder.CreateAShr(
- Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
- Amt->getZExtValue())),
- "shrd_n");
- }
- case NEON::BI__builtin_neon_vshrd_n_u64: {
- llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
- uint64_t ShiftAmt = Amt->getZExtValue();
- // Right-shifting an unsigned value by its size yields 0.
- if (ShiftAmt == 64)
- return ConstantInt::get(Int64Ty, 0);
- return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
- "shrd_n");
- }
- case NEON::BI__builtin_neon_vsrad_n_s64: {
- llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
- Ops[1] = Builder.CreateAShr(
- Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
- Amt->getZExtValue())),
- "shrd_n");
- return Builder.CreateAdd(Ops[0], Ops[1]);
- }
- case NEON::BI__builtin_neon_vsrad_n_u64: {
- llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
- uint64_t ShiftAmt = Amt->getZExtValue();
- // Right-shifting an unsigned value by its size yields 0.
- // As Op + 0 = Op, return Ops[0] directly.
- if (ShiftAmt == 64)
- return Ops[0];
- Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
- "shrd_n");
- return Builder.CreateAdd(Ops[0], Ops[1]);
- }
- case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
- case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
- case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
- case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
- Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
- "lane");
- SmallVector<Value *, 2> ProductOps;
- ProductOps.push_back(vectorWrapScalar16(Ops[1]));
- ProductOps.push_back(vectorWrapScalar16(Ops[2]));
- llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
- Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
- ProductOps, "vqdmlXl");
- Constant *CI = ConstantInt::get(SizeTy, 0);
- Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
- Ops.pop_back();
- unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
- BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
- ? Intrinsic::aarch64_neon_sqadd
- : Intrinsic::aarch64_neon_sqsub;
- return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
- }
- case NEON::BI__builtin_neon_vqdmlals_s32:
- case NEON::BI__builtin_neon_vqdmlsls_s32: {
- SmallVector<Value *, 2> ProductOps;
- ProductOps.push_back(Ops[1]);
- ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
- Ops[1] =
- EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
- ProductOps, "vqdmlXl");
- unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
- ? Intrinsic::aarch64_neon_sqadd
- : Intrinsic::aarch64_neon_sqsub;
- return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
- }
- case NEON::BI__builtin_neon_vqdmlals_lane_s32:
- case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
- case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
- case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
- Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
- "lane");
- SmallVector<Value *, 2> ProductOps;
- ProductOps.push_back(Ops[1]);
- ProductOps.push_back(Ops[2]);
- Ops[1] =
- EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
- ProductOps, "vqdmlXl");
- Ops.pop_back();
- unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
- BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
- ? Intrinsic::aarch64_neon_sqadd
- : Intrinsic::aarch64_neon_sqsub;
- return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
- }
- case NEON::BI__builtin_neon_vduph_lane_f16: {
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
- }
- case NEON::BI__builtin_neon_vduph_laneq_f16: {
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vgetq_lane");
- }
- }
- llvm::VectorType *VTy = GetNeonType(this, Type);
- llvm::Type *Ty = VTy;
- if (!Ty)
- return nullptr;
- // Not all intrinsics handled by the common case work for AArch64 yet, so only
- // defer to common code if it's been added to our special map.
- Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
- AArch64SIMDIntrinsicsProvenSorted);
- if (Builtin)
- return EmitCommonNeonBuiltinExpr(
- Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
- Builtin->NameHint, Builtin->TypeModifier, E, Ops,
- /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
- if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
- return V;
- unsigned Int;
- switch (BuiltinID) {
- default: return nullptr;
- case NEON::BI__builtin_neon_vbsl_v:
- case NEON::BI__builtin_neon_vbslq_v: {
- llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
- Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
- Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
- Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
- Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
- Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
- Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
- return Builder.CreateBitCast(Ops[0], Ty);
- }
- case NEON::BI__builtin_neon_vfma_lane_v:
- case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
- // The ARM builtins (and instructions) have the addend as the first
- // operand, but the 'fma' intrinsics have it last. Swap it around here.
- Value *Addend = Ops[0];
- Value *Multiplicand = Ops[1];
- Value *LaneSource = Ops[2];
- Ops[0] = Multiplicand;
- Ops[1] = LaneSource;
- Ops[2] = Addend;
- // Now adjust things to handle the lane access.
- llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
- llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
- VTy;
- llvm::Constant *cst = cast<Constant>(Ops[3]);
- Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
- Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
- Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
- Ops.pop_back();
- Int = Intrinsic::fma;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
- }
- case NEON::BI__builtin_neon_vfma_laneq_v: {
- llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
- // v1f64 fma should be mapped to Neon scalar f64 fma
- if (VTy && VTy->getElementType() == DoubleTy) {
- Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
- llvm::Type *VTy = GetNeonType(this,
- NeonTypeFlags(NeonTypeFlags::Float64, false, true));
- Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
- Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
- Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
- Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
- return Builder.CreateBitCast(Result, Ty);
- }
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
- VTy->getNumElements() * 2);
- Ops[2] = Builder.CreateBitCast(Ops[2], STy);
- Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
- cast<ConstantInt>(Ops[3]));
- Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
- return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
- }
- case NEON::BI__builtin_neon_vfmaq_laneq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
- return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
- }
- case NEON::BI__builtin_neon_vfmah_lane_f16:
- case NEON::BI__builtin_neon_vfmas_lane_f32:
- case NEON::BI__builtin_neon_vfmah_laneq_f16:
- case NEON::BI__builtin_neon_vfmas_laneq_f32:
- case NEON::BI__builtin_neon_vfmad_lane_f64:
- case NEON::BI__builtin_neon_vfmad_laneq_f64: {
- Ops.push_back(EmitScalarExpr(E->getArg(3)));
- llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
- Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
- return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
- }
- case NEON::BI__builtin_neon_vmull_v:
- // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
- Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
- if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
- case NEON::BI__builtin_neon_vmax_v:
- case NEON::BI__builtin_neon_vmaxq_v:
- // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
- Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
- case NEON::BI__builtin_neon_vmaxh_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Int = Intrinsic::aarch64_neon_fmax;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
- }
- case NEON::BI__builtin_neon_vmin_v:
- case NEON::BI__builtin_neon_vminq_v:
- // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
- Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
- case NEON::BI__builtin_neon_vminh_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Int = Intrinsic::aarch64_neon_fmin;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
- }
- case NEON::BI__builtin_neon_vabd_v:
- case NEON::BI__builtin_neon_vabdq_v:
- // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
- Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
- case NEON::BI__builtin_neon_vpadal_v:
- case NEON::BI__builtin_neon_vpadalq_v: {
- unsigned ArgElts = VTy->getNumElements();
- llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
- unsigned BitWidth = EltTy->getBitWidth();
- llvm::Type *ArgTy = llvm::VectorType::get(
- llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
- llvm::Type* Tys[2] = { VTy, ArgTy };
- Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
- SmallVector<llvm::Value*, 1> TmpOps;
- TmpOps.push_back(Ops[1]);
- Function *F = CGM.getIntrinsic(Int, Tys);
- llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
- llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
- return Builder.CreateAdd(tmp, addend);
- }
- case NEON::BI__builtin_neon_vpmin_v:
- case NEON::BI__builtin_neon_vpminq_v:
- // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
- Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
- case NEON::BI__builtin_neon_vpmax_v:
- case NEON::BI__builtin_neon_vpmaxq_v:
- // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
- Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
- if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
- case NEON::BI__builtin_neon_vminnm_v:
- case NEON::BI__builtin_neon_vminnmq_v:
- Int = Intrinsic::aarch64_neon_fminnm;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
- case NEON::BI__builtin_neon_vminnmh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Int = Intrinsic::aarch64_neon_fminnm;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
- case NEON::BI__builtin_neon_vmaxnm_v:
- case NEON::BI__builtin_neon_vmaxnmq_v:
- Int = Intrinsic::aarch64_neon_fmaxnm;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
- case NEON::BI__builtin_neon_vmaxnmh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- Int = Intrinsic::aarch64_neon_fmaxnm;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
- case NEON::BI__builtin_neon_vrecpss_f32: {
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
- Ops, "vrecps");
- }
- case NEON::BI__builtin_neon_vrecpsd_f64:
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
- Ops, "vrecps");
- case NEON::BI__builtin_neon_vrecpsh_f16:
- Ops.push_back(EmitScalarExpr(E->getArg(1)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
- Ops, "vrecps");
- case NEON::BI__builtin_neon_vqshrun_n_v:
- Int = Intrinsic::aarch64_neon_sqshrun;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
- case NEON::BI__builtin_neon_vqrshrun_n_v:
- Int = Intrinsic::aarch64_neon_sqrshrun;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
- case NEON::BI__builtin_neon_vqshrn_n_v:
- Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
- case NEON::BI__builtin_neon_vrshrn_n_v:
- Int = Intrinsic::aarch64_neon_rshrn;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
- case NEON::BI__builtin_neon_vqrshrn_n_v:
- Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
- case NEON::BI__builtin_neon_vrndah_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::round;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
- }
- case NEON::BI__builtin_neon_vrnda_v:
- case NEON::BI__builtin_neon_vrndaq_v: {
- Int = Intrinsic::round;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
- }
- case NEON::BI__builtin_neon_vrndih_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::nearbyint;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
- }
- case NEON::BI__builtin_neon_vrndmh_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::floor;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
- }
- case NEON::BI__builtin_neon_vrndm_v:
- case NEON::BI__builtin_neon_vrndmq_v: {
- Int = Intrinsic::floor;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
- }
- case NEON::BI__builtin_neon_vrndnh_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::aarch64_neon_frintn;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
- }
- case NEON::BI__builtin_neon_vrndn_v:
- case NEON::BI__builtin_neon_vrndnq_v: {
- Int = Intrinsic::aarch64_neon_frintn;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
- }
- case NEON::BI__builtin_neon_vrndns_f32: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::aarch64_neon_frintn;
- return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
- }
- case NEON::BI__builtin_neon_vrndph_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::ceil;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
- }
- case NEON::BI__builtin_neon_vrndp_v:
- case NEON::BI__builtin_neon_vrndpq_v: {
- Int = Intrinsic::ceil;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
- }
- case NEON::BI__builtin_neon_vrndxh_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::rint;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
- }
- case NEON::BI__builtin_neon_vrndx_v:
- case NEON::BI__builtin_neon_vrndxq_v: {
- Int = Intrinsic::rint;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
- }
- case NEON::BI__builtin_neon_vrndh_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::trunc;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
- }
- case NEON::BI__builtin_neon_vrnd_v:
- case NEON::BI__builtin_neon_vrndq_v: {
- Int = Intrinsic::trunc;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
- }
- case NEON::BI__builtin_neon_vcvt_f64_v:
- case NEON::BI__builtin_neon_vcvtq_f64_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
- return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
- : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
- case NEON::BI__builtin_neon_vcvt_f64_f32: {
- assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
- "unexpected vcvt_f64_f32 builtin");
- NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
- return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
- }
- case NEON::BI__builtin_neon_vcvt_f32_f64: {
- assert(Type.getEltType() == NeonTypeFlags::Float32 &&
- "unexpected vcvt_f32_f64 builtin");
- NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
- return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
- }
- case NEON::BI__builtin_neon_vcvt_s32_v:
- case NEON::BI__builtin_neon_vcvt_u32_v:
- case NEON::BI__builtin_neon_vcvt_s64_v:
- case NEON::BI__builtin_neon_vcvt_u64_v:
- case NEON::BI__builtin_neon_vcvt_s16_v:
- case NEON::BI__builtin_neon_vcvt_u16_v:
- case NEON::BI__builtin_neon_vcvtq_s32_v:
- case NEON::BI__builtin_neon_vcvtq_u32_v:
- case NEON::BI__builtin_neon_vcvtq_s64_v:
- case NEON::BI__builtin_neon_vcvtq_u64_v:
- case NEON::BI__builtin_neon_vcvtq_s16_v:
- case NEON::BI__builtin_neon_vcvtq_u16_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
- if (usgn)
- return Builder.CreateFPToUI(Ops[0], Ty);
- return Builder.CreateFPToSI(Ops[0], Ty);
- }
- case NEON::BI__builtin_neon_vcvta_s16_v:
- case NEON::BI__builtin_neon_vcvta_u16_v:
- case NEON::BI__builtin_neon_vcvta_s32_v:
- case NEON::BI__builtin_neon_vcvtaq_s16_v:
- case NEON::BI__builtin_neon_vcvtaq_s32_v:
- case NEON::BI__builtin_neon_vcvta_u32_v:
- case NEON::BI__builtin_neon_vcvtaq_u16_v:
- case NEON::BI__builtin_neon_vcvtaq_u32_v:
- case NEON::BI__builtin_neon_vcvta_s64_v:
- case NEON::BI__builtin_neon_vcvtaq_s64_v:
- case NEON::BI__builtin_neon_vcvta_u64_v:
- case NEON::BI__builtin_neon_vcvtaq_u64_v: {
- Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
- }
- case NEON::BI__builtin_neon_vcvtm_s16_v:
- case NEON::BI__builtin_neon_vcvtm_s32_v:
- case NEON::BI__builtin_neon_vcvtmq_s16_v:
- case NEON::BI__builtin_neon_vcvtmq_s32_v:
- case NEON::BI__builtin_neon_vcvtm_u16_v:
- case NEON::BI__builtin_neon_vcvtm_u32_v:
- case NEON::BI__builtin_neon_vcvtmq_u16_v:
- case NEON::BI__builtin_neon_vcvtmq_u32_v:
- case NEON::BI__builtin_neon_vcvtm_s64_v:
- case NEON::BI__builtin_neon_vcvtmq_s64_v:
- case NEON::BI__builtin_neon_vcvtm_u64_v:
- case NEON::BI__builtin_neon_vcvtmq_u64_v: {
- Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
- }
- case NEON::BI__builtin_neon_vcvtn_s16_v:
- case NEON::BI__builtin_neon_vcvtn_s32_v:
- case NEON::BI__builtin_neon_vcvtnq_s16_v:
- case NEON::BI__builtin_neon_vcvtnq_s32_v:
- case NEON::BI__builtin_neon_vcvtn_u16_v:
- case NEON::BI__builtin_neon_vcvtn_u32_v:
- case NEON::BI__builtin_neon_vcvtnq_u16_v:
- case NEON::BI__builtin_neon_vcvtnq_u32_v:
- case NEON::BI__builtin_neon_vcvtn_s64_v:
- case NEON::BI__builtin_neon_vcvtnq_s64_v:
- case NEON::BI__builtin_neon_vcvtn_u64_v:
- case NEON::BI__builtin_neon_vcvtnq_u64_v: {
- Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
- }
- case NEON::BI__builtin_neon_vcvtp_s16_v:
- case NEON::BI__builtin_neon_vcvtp_s32_v:
- case NEON::BI__builtin_neon_vcvtpq_s16_v:
- case NEON::BI__builtin_neon_vcvtpq_s32_v:
- case NEON::BI__builtin_neon_vcvtp_u16_v:
- case NEON::BI__builtin_neon_vcvtp_u32_v:
- case NEON::BI__builtin_neon_vcvtpq_u16_v:
- case NEON::BI__builtin_neon_vcvtpq_u32_v:
- case NEON::BI__builtin_neon_vcvtp_s64_v:
- case NEON::BI__builtin_neon_vcvtpq_s64_v:
- case NEON::BI__builtin_neon_vcvtp_u64_v:
- case NEON::BI__builtin_neon_vcvtpq_u64_v: {
- Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
- llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
- }
- case NEON::BI__builtin_neon_vmulx_v:
- case NEON::BI__builtin_neon_vmulxq_v: {
- Int = Intrinsic::aarch64_neon_fmulx;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
- }
- case NEON::BI__builtin_neon_vmulxh_lane_f16:
- case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
- // vmulx_lane should be mapped to Neon scalar mulx after
- // extracting the scalar element
- Ops.push_back(EmitScalarExpr(E->getArg(2)));
- Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
- Ops.pop_back();
- Int = Intrinsic::aarch64_neon_fmulx;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
- }
- case NEON::BI__builtin_neon_vmul_lane_v:
- case NEON::BI__builtin_neon_vmul_laneq_v: {
- // v1f64 vmul_lane should be mapped to Neon scalar mul lane
- bool Quad = false;
- if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
- Quad = true;
- Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
- llvm::Type *VTy = GetNeonType(this,
- NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
- Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
- Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
- Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
- return Builder.CreateBitCast(Result, Ty);
- }
- case NEON::BI__builtin_neon_vnegd_s64:
- return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
- case NEON::BI__builtin_neon_vnegh_f16:
- return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
- case NEON::BI__builtin_neon_vpmaxnm_v:
- case NEON::BI__builtin_neon_vpmaxnmq_v: {
- Int = Intrinsic::aarch64_neon_fmaxnmp;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
- }
- case NEON::BI__builtin_neon_vpminnm_v:
- case NEON::BI__builtin_neon_vpminnmq_v: {
- Int = Intrinsic::aarch64_neon_fminnmp;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
- }
- case NEON::BI__builtin_neon_vsqrth_f16: {
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::sqrt;
- return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
- }
- case NEON::BI__builtin_neon_vsqrt_v:
- case NEON::BI__builtin_neon_vsqrtq_v: {
- Int = Intrinsic::sqrt;
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
- }
- case NEON::BI__builtin_neon_vrbit_v:
- case NEON::BI__builtin_neon_vrbitq_v: {
- Int = Intrinsic::aarch64_neon_rbit;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
- }
- case NEON::BI__builtin_neon_vaddv_u8:
- // FIXME: These are handled by the AArch64 scalar code.
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vaddv_s8: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vaddv_u16:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vaddv_s16: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vaddvq_u8:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vaddvq_s8: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vaddvq_u16:
- usgn = true;
- LLVM_FALLTHROUGH;
- case NEON::BI__builtin_neon_vaddvq_s16: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_u8: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_u16: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_u8: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_u16: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_s8: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_s16: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_s8: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_s16: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_f16: {
- Int = Intrinsic::aarch64_neon_fmaxv;
- Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], HalfTy);
- }
- case NEON::BI__builtin_neon_vmaxvq_f16: {
- Int = Intrinsic::aarch64_neon_fmaxv;
- Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], HalfTy);
- }
- case NEON::BI__builtin_neon_vminv_u8: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminv_u16: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminvq_u8: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminvq_u16: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminv_s8: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminv_s16: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminvq_s8: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminvq_s16: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminv_f16: {
- Int = Intrinsic::aarch64_neon_fminv;
- Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], HalfTy);
- }
- case NEON::BI__builtin_neon_vminvq_f16: {
- Int = Intrinsic::aarch64_neon_fminv;
- Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], HalfTy);
- }
- case NEON::BI__builtin_neon_vmaxnmv_f16: {
- Int = Intrinsic::aarch64_neon_fmaxnmv;
- Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
- return Builder.CreateTrunc(Ops[0], HalfTy);
- }
- case NEON::BI__builtin_neon_vmaxnmvq_f16: {
- Int = Intrinsic::aarch64_neon_fmaxnmv;
- Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
- return Builder.CreateTrunc(Ops[0], HalfTy);
- }
- case NEON::BI__builtin_neon_vminnmv_f16: {
- Int = Intrinsic::aarch64_neon_fminnmv;
- Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
- return Builder.CreateTrunc(Ops[0], HalfTy);
- }
- case NEON::BI__builtin_neon_vminnmvq_f16: {
- Int = Intrinsic::aarch64_neon_fminnmv;
- Ty = HalfTy;
- VTy = llvm::VectorType::get(HalfTy, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
- return Builder.CreateTrunc(Ops[0], HalfTy);
- }
- case NEON::BI__builtin_neon_vmul_n_f64: {
- Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
- Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
- return Builder.CreateFMul(Ops[0], RHS);
- }
- case NEON::BI__builtin_neon_vaddlv_u8: {
- Int = Intrinsic::aarch64_neon_uaddlv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vaddlv_u16: {
- Int = Intrinsic::aarch64_neon_uaddlv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- }
- case NEON::BI__builtin_neon_vaddlvq_u8: {
- Int = Intrinsic::aarch64_neon_uaddlv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vaddlvq_u16: {
- Int = Intrinsic::aarch64_neon_uaddlv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- }
- case NEON::BI__builtin_neon_vaddlv_s8: {
- Int = Intrinsic::aarch64_neon_saddlv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vaddlv_s16: {
- Int = Intrinsic::aarch64_neon_saddlv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- }
- case NEON::BI__builtin_neon_vaddlvq_s8: {
- Int = Intrinsic::aarch64_neon_saddlv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vaddlvq_s16: {
- Int = Intrinsic::aarch64_neon_saddlv;
- Ty = Int32Ty;
- VTy = llvm::VectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
- }
- case NEON::BI__builtin_neon_vsri_n_v:
- case NEON::BI__builtin_neon_vsriq_n_v: {
- Int = Intrinsic::aarch64_neon_vsri;
- llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
- return EmitNeonCall(Intrin, Ops, "vsri_n");
- }
- case NEON::BI__builtin_neon_vsli_n_v:
- case NEON::BI__builtin_neon_vsliq_n_v: {
- Int = Intrinsic::aarch64_neon_vsli;
- llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
- return EmitNeonCall(Intrin, Ops, "vsli_n");
- }
- case NEON::BI__builtin_neon_vsra_n_v:
- case NEON::BI__builtin_neon_vsraq_n_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
- return Builder.CreateAdd(Ops[0], Ops[1]);
- case NEON::BI__builtin_neon_vrsra_n_v:
- case NEON::BI__builtin_neon_vrsraq_n_v: {
- Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
- SmallVector<llvm::Value*,2> TmpOps;
- TmpOps.push_back(Ops[1]);
- TmpOps.push_back(Ops[2]);
- Function* F = CGM.getIntrinsic(Int, Ty);
- llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
- Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
- return Builder.CreateAdd(Ops[0], tmp);
- }
- case NEON::BI__builtin_neon_vld1_v:
- case NEON::BI__builtin_neon_vld1q_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
- return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
- }
- case NEON::BI__builtin_neon_vst1_v:
- case NEON::BI__builtin_neon_vst1q_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
- Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- case NEON::BI__builtin_neon_vld1_lane_v:
- case NEON::BI__builtin_neon_vld1q_lane_v: {
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
- Ops[0] =
- Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
- }
- case NEON::BI__builtin_neon_vld1_dup_v:
- case NEON::BI__builtin_neon_vld1q_dup_v: {
- Value *V = UndefValue::get(Ty);
- Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- auto Alignment = CharUnits::fromQuantity(
- BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
- Ops[0] =
- Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
- llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
- Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
- return EmitNeonSplat(Ops[0], CI);
- }
- case NEON::BI__builtin_neon_vst1_lane_v:
- case NEON::BI__builtin_neon_vst1q_lane_v:
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateDefaultAlignedStore(Ops[1],
- Builder.CreateBitCast(Ops[0], Ty));
- case NEON::BI__builtin_neon_vld2_v:
- case NEON::BI__builtin_neon_vld2q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld3_v:
- case NEON::BI__builtin_neon_vld3q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld4_v:
- case NEON::BI__builtin_neon_vld4q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld2_dup_v:
- case NEON::BI__builtin_neon_vld2q_dup_v: {
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld3_dup_v:
- case NEON::BI__builtin_neon_vld3q_dup_v: {
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld4_dup_v:
- case NEON::BI__builtin_neon_vld4q_dup_v: {
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld2_lane_v:
- case NEON::BI__builtin_neon_vld2q_lane_v: {
- llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
- Ops.push_back(Ops[1]);
- Ops.erase(Ops.begin()+1);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
- Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld3_lane_v:
- case NEON::BI__builtin_neon_vld3q_lane_v: {
- llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
- Ops.push_back(Ops[1]);
- Ops.erase(Ops.begin()+1);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
- Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
- Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vld4_lane_v:
- case NEON::BI__builtin_neon_vld4q_lane_v: {
- llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
- Ops.push_back(Ops[1]);
- Ops.erase(Ops.begin()+1);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
- Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
- Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
- Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vst2_v:
- case NEON::BI__builtin_neon_vst2q_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
- llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
- Ops, "");
- }
- case NEON::BI__builtin_neon_vst2_lane_v:
- case NEON::BI__builtin_neon_vst2q_lane_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
- Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
- llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
- Ops, "");
- }
- case NEON::BI__builtin_neon_vst3_v:
- case NEON::BI__builtin_neon_vst3q_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
- llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
- Ops, "");
- }
- case NEON::BI__builtin_neon_vst3_lane_v:
- case NEON::BI__builtin_neon_vst3q_lane_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
- Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
- llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
- Ops, "");
- }
- case NEON::BI__builtin_neon_vst4_v:
- case NEON::BI__builtin_neon_vst4q_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
- llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
- Ops, "");
- }
- case NEON::BI__builtin_neon_vst4_lane_v:
- case NEON::BI__builtin_neon_vst4q_lane_v: {
- Ops.push_back(Ops[0]);
- Ops.erase(Ops.begin());
- Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
- llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
- Ops, "");
- }
- case NEON::BI__builtin_neon_vtrn_v:
- case NEON::BI__builtin_neon_vtrnq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = nullptr;
- for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back(i+vi);
- Indices.push_back(i+e+vi);
- }
- Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
- SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
- SV = Builder.CreateDefaultAlignedStore(SV, Addr);
- }
- return SV;
- }
- case NEON::BI__builtin_neon_vuzp_v:
- case NEON::BI__builtin_neon_vuzpq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = nullptr;
- for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
- Indices.push_back(2*i+vi);
- Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
- SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
- SV = Builder.CreateDefaultAlignedStore(SV, Addr);
- }
- return SV;
- }
- case NEON::BI__builtin_neon_vzip_v:
- case NEON::BI__builtin_neon_vzipq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = nullptr;
- for (unsigned vi = 0; vi != 2; ++vi) {
- SmallVector<uint32_t, 16> Indices;
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back((i + vi*e) >> 1);
- Indices.push_back(((i + vi*e) >> 1)+e);
- }
- Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
- SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
- SV = Builder.CreateDefaultAlignedStore(SV, Addr);
- }
- return SV;
- }
- case NEON::BI__builtin_neon_vqtbl1q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
- Ops, "vtbl1");
- }
- case NEON::BI__builtin_neon_vqtbl2q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
- Ops, "vtbl2");
- }
- case NEON::BI__builtin_neon_vqtbl3q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
- Ops, "vtbl3");
- }
- case NEON::BI__builtin_neon_vqtbl4q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
- Ops, "vtbl4");
- }
- case NEON::BI__builtin_neon_vqtbx1q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
- Ops, "vtbx1");
- }
- case NEON::BI__builtin_neon_vqtbx2q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
- Ops, "vtbx2");
- }
- case NEON::BI__builtin_neon_vqtbx3q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
- Ops, "vtbx3");
- }
- case NEON::BI__builtin_neon_vqtbx4q_v: {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
- Ops, "vtbx4");
- }
- case NEON::BI__builtin_neon_vsqadd_v:
- case NEON::BI__builtin_neon_vsqaddq_v: {
- Int = Intrinsic::aarch64_neon_usqadd;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
- }
- case NEON::BI__builtin_neon_vuqadd_v:
- case NEON::BI__builtin_neon_vuqaddq_v: {
- Int = Intrinsic::aarch64_neon_suqadd;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
- }
- case AArch64::BI_BitScanForward:
- case AArch64::BI_BitScanForward64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
- case AArch64::BI_BitScanReverse:
- case AArch64::BI_BitScanReverse64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
- case AArch64::BI_InterlockedAnd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
- case AArch64::BI_InterlockedExchange64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
- case AArch64::BI_InterlockedExchangeAdd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
- case AArch64::BI_InterlockedExchangeSub64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
- case AArch64::BI_InterlockedOr64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
- case AArch64::BI_InterlockedXor64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
- case AArch64::BI_InterlockedDecrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
- case AArch64::BI_InterlockedIncrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
- case AArch64::BI_InterlockedExchangeAdd8_acq:
- case AArch64::BI_InterlockedExchangeAdd16_acq:
- case AArch64::BI_InterlockedExchangeAdd_acq:
- case AArch64::BI_InterlockedExchangeAdd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
- case AArch64::BI_InterlockedExchangeAdd8_rel:
- case AArch64::BI_InterlockedExchangeAdd16_rel:
- case AArch64::BI_InterlockedExchangeAdd_rel:
- case AArch64::BI_InterlockedExchangeAdd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
- case AArch64::BI_InterlockedExchangeAdd8_nf:
- case AArch64::BI_InterlockedExchangeAdd16_nf:
- case AArch64::BI_InterlockedExchangeAdd_nf:
- case AArch64::BI_InterlockedExchangeAdd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
- case AArch64::BI_InterlockedExchange8_acq:
- case AArch64::BI_InterlockedExchange16_acq:
- case AArch64::BI_InterlockedExchange_acq:
- case AArch64::BI_InterlockedExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
- case AArch64::BI_InterlockedExchange8_rel:
- case AArch64::BI_InterlockedExchange16_rel:
- case AArch64::BI_InterlockedExchange_rel:
- case AArch64::BI_InterlockedExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
- case AArch64::BI_InterlockedExchange8_nf:
- case AArch64::BI_InterlockedExchange16_nf:
- case AArch64::BI_InterlockedExchange_nf:
- case AArch64::BI_InterlockedExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
- case AArch64::BI_InterlockedCompareExchange8_acq:
- case AArch64::BI_InterlockedCompareExchange16_acq:
- case AArch64::BI_InterlockedCompareExchange_acq:
- case AArch64::BI_InterlockedCompareExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
- case AArch64::BI_InterlockedCompareExchange8_rel:
- case AArch64::BI_InterlockedCompareExchange16_rel:
- case AArch64::BI_InterlockedCompareExchange_rel:
- case AArch64::BI_InterlockedCompareExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
- case AArch64::BI_InterlockedCompareExchange8_nf:
- case AArch64::BI_InterlockedCompareExchange16_nf:
- case AArch64::BI_InterlockedCompareExchange_nf:
- case AArch64::BI_InterlockedCompareExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
- case AArch64::BI_InterlockedOr8_acq:
- case AArch64::BI_InterlockedOr16_acq:
- case AArch64::BI_InterlockedOr_acq:
- case AArch64::BI_InterlockedOr64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
- case AArch64::BI_InterlockedOr8_rel:
- case AArch64::BI_InterlockedOr16_rel:
- case AArch64::BI_InterlockedOr_rel:
- case AArch64::BI_InterlockedOr64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
- case AArch64::BI_InterlockedOr8_nf:
- case AArch64::BI_InterlockedOr16_nf:
- case AArch64::BI_InterlockedOr_nf:
- case AArch64::BI_InterlockedOr64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
- case AArch64::BI_InterlockedXor8_acq:
- case AArch64::BI_InterlockedXor16_acq:
- case AArch64::BI_InterlockedXor_acq:
- case AArch64::BI_InterlockedXor64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
- case AArch64::BI_InterlockedXor8_rel:
- case AArch64::BI_InterlockedXor16_rel:
- case AArch64::BI_InterlockedXor_rel:
- case AArch64::BI_InterlockedXor64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
- case AArch64::BI_InterlockedXor8_nf:
- case AArch64::BI_InterlockedXor16_nf:
- case AArch64::BI_InterlockedXor_nf:
- case AArch64::BI_InterlockedXor64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
- case AArch64::BI_InterlockedAnd8_acq:
- case AArch64::BI_InterlockedAnd16_acq:
- case AArch64::BI_InterlockedAnd_acq:
- case AArch64::BI_InterlockedAnd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
- case AArch64::BI_InterlockedAnd8_rel:
- case AArch64::BI_InterlockedAnd16_rel:
- case AArch64::BI_InterlockedAnd_rel:
- case AArch64::BI_InterlockedAnd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
- case AArch64::BI_InterlockedAnd8_nf:
- case AArch64::BI_InterlockedAnd16_nf:
- case AArch64::BI_InterlockedAnd_nf:
- case AArch64::BI_InterlockedAnd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
- case AArch64::BI_InterlockedIncrement16_acq:
- case AArch64::BI_InterlockedIncrement_acq:
- case AArch64::BI_InterlockedIncrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
- case AArch64::BI_InterlockedIncrement16_rel:
- case AArch64::BI_InterlockedIncrement_rel:
- case AArch64::BI_InterlockedIncrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
- case AArch64::BI_InterlockedIncrement16_nf:
- case AArch64::BI_InterlockedIncrement_nf:
- case AArch64::BI_InterlockedIncrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
- case AArch64::BI_InterlockedDecrement16_acq:
- case AArch64::BI_InterlockedDecrement_acq:
- case AArch64::BI_InterlockedDecrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
- case AArch64::BI_InterlockedDecrement16_rel:
- case AArch64::BI_InterlockedDecrement_rel:
- case AArch64::BI_InterlockedDecrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
- case AArch64::BI_InterlockedDecrement16_nf:
- case AArch64::BI_InterlockedDecrement_nf:
- case AArch64::BI_InterlockedDecrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
- case AArch64::BI_InterlockedAdd: {
- Value *Arg0 = EmitScalarExpr(E->getArg(0));
- Value *Arg1 = EmitScalarExpr(E->getArg(1));
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add, Arg0, Arg1,
- llvm::AtomicOrdering::SequentiallyConsistent);
- return Builder.CreateAdd(RMWI, Arg1);
- }
- }
- }
- llvm::Value *CodeGenFunction::
- BuildVector(ArrayRef<llvm::Value*> Ops) {
- assert((Ops.size() & (Ops.size() - 1)) == 0 &&
- "Not a power-of-two sized vector!");
- bool AllConstants = true;
- for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
- AllConstants &= isa<Constant>(Ops[i]);
- // If this is a constant vector, create a ConstantVector.
- if (AllConstants) {
- SmallVector<llvm::Constant*, 16> CstOps;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- CstOps.push_back(cast<Constant>(Ops[i]));
- return llvm::ConstantVector::get(CstOps);
- }
- // Otherwise, insertelement the values to build the vector.
- Value *Result =
- llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
- return Result;
- }
- // Convert the mask from an integer type to a vector of i1.
- static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
- unsigned NumElts) {
- llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
- cast<IntegerType>(Mask->getType())->getBitWidth());
- Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
- // If we have less than 8 elements, then the starting mask was an i8 and
- // we need to extract down to the right number of elements.
- if (NumElts < 8) {
- uint32_t Indices[4];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i;
- MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
- makeArrayRef(Indices, NumElts),
- "extract");
- }
- return MaskVec;
- }
- static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops,
- unsigned Align) {
- // Cast the pointer to right type.
- Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- Ops[1]->getType()->getVectorNumElements());
- return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
- }
- static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, unsigned Align) {
- // Cast the pointer to right type.
- Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- Ops[1]->getType()->getVectorNumElements());
- return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
- }
- static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops) {
- llvm::Type *ResultTy = Ops[1]->getType();
- llvm::Type *PtrTy = ResultTy->getVectorElementType();
- // Cast the pointer to element type.
- Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(PtrTy));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
- llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
- ResultTy);
- return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
- }
- static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops,
- bool IsCompress) {
- llvm::Type *ResultTy = Ops[1]->getType();
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
- Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
- : Intrinsic::x86_avx512_mask_expand;
- llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
- return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
- }
- static Value *EmitX86CompressStore(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops) {
- llvm::Type *ResultTy = Ops[1]->getType();
- llvm::Type *PtrTy = ResultTy->getVectorElementType();
- // Cast the pointer to element type.
- Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(PtrTy));
- Value *MaskVec = getMaskVecValue(CGF, Ops[2],
- ResultTy->getVectorNumElements());
- llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
- ResultTy);
- return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
- }
- static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
- ArrayRef<Value *> Ops,
- bool InvertLHS = false) {
- unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
- Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
- Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
- if (InvertLHS)
- LHS = CGF.Builder.CreateNot(LHS);
- return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
- Ops[0]->getType());
- }
- static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
- Value *Amt, bool IsRight) {
- llvm::Type *Ty = Op0->getType();
- // Amount may be scalar immediate, in which case create a splat vector.
- // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
- // we only care about the lowest log2 bits anyway.
- if (Amt->getType() != Ty) {
- unsigned NumElts = Ty->getVectorNumElements();
- Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
- Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
- }
- unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
- Function *F = CGF.CGM.getIntrinsic(IID, Ty);
- return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
- }
- static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
- bool IsSigned) {
- Value *Op0 = Ops[0];
- Value *Op1 = Ops[1];
- llvm::Type *Ty = Op0->getType();
- uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
- CmpInst::Predicate Pred;
- switch (Imm) {
- case 0x0:
- Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
- break;
- case 0x1:
- Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
- break;
- case 0x2:
- Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
- break;
- case 0x3:
- Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
- break;
- case 0x4:
- Pred = ICmpInst::ICMP_EQ;
- break;
- case 0x5:
- Pred = ICmpInst::ICMP_NE;
- break;
- case 0x6:
- return llvm::Constant::getNullValue(Ty); // FALSE
- case 0x7:
- return llvm::Constant::getAllOnesValue(Ty); // TRUE
- default:
- llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
- }
- Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
- Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
- return Res;
- }
- static Value *EmitX86Select(CodeGenFunction &CGF,
- Value *Mask, Value *Op0, Value *Op1) {
- // If the mask is all ones just return first argument.
- if (const auto *C = dyn_cast<Constant>(Mask))
- if (C->isAllOnesValue())
- return Op0;
- Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements());
- return CGF.Builder.CreateSelect(Mask, Op0, Op1);
- }
- static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
- Value *Mask, Value *Op0, Value *Op1) {
- // If the mask is all ones just return first argument.
- if (const auto *C = dyn_cast<Constant>(Mask))
- if (C->isAllOnesValue())
- return Op0;
- llvm::VectorType *MaskTy =
- llvm::VectorType::get(CGF.Builder.getInt1Ty(),
- Mask->getType()->getIntegerBitWidth());
- Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
- Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
- return CGF.Builder.CreateSelect(Mask, Op0, Op1);
- }
- static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
- unsigned NumElts, Value *MaskIn) {
- if (MaskIn) {
- const auto *C = dyn_cast<Constant>(MaskIn);
- if (!C || !C->isAllOnesValue())
- Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
- }
- if (NumElts < 8) {
- uint32_t Indices[8];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i;
- for (unsigned i = NumElts; i != 8; ++i)
- Indices[i] = i % NumElts + NumElts;
- Cmp = CGF.Builder.CreateShuffleVector(
- Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
- }
- return CGF.Builder.CreateBitCast(Cmp,
- IntegerType::get(CGF.getLLVMContext(),
- std::max(NumElts, 8U)));
- }
- static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
- bool Signed, ArrayRef<Value *> Ops) {
- assert((Ops.size() == 2 || Ops.size() == 4) &&
- "Unexpected number of arguments");
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- Value *Cmp;
- if (CC == 3) {
- Cmp = Constant::getNullValue(
- llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
- } else if (CC == 7) {
- Cmp = Constant::getAllOnesValue(
- llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
- } else {
- ICmpInst::Predicate Pred;
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case 0: Pred = ICmpInst::ICMP_EQ; break;
- case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
- case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
- case 4: Pred = ICmpInst::ICMP_NE; break;
- case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
- case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
- }
- Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
- }
- Value *MaskIn = nullptr;
- if (Ops.size() == 4)
- MaskIn = Ops[3];
- return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
- }
- static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
- Value *Zero = Constant::getNullValue(In->getType());
- return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
- }
- static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, bool IsSigned) {
- unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
- llvm::Type *Ty = Ops[1]->getType();
- Value *Res;
- if (Rnd != 4) {
- Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
- : Intrinsic::x86_avx512_uitofp_round;
- Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
- Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
- } else {
- Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
- : CGF.Builder.CreateUIToFP(Ops[0], Ty);
- }
- return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
- }
- static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
- llvm::Type *Ty = Ops[0]->getType();
- Value *Zero = llvm::Constant::getNullValue(Ty);
- Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]);
- Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero);
- Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub);
- return Res;
- }
- static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
- ArrayRef<Value *> Ops) {
- Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
- Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- assert(Ops.size() == 2);
- return Res;
- }
- // Lowers X86 FMA intrinsics to IR.
- static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
- unsigned BuiltinID, bool IsAddSub) {
- bool Subtract = false;
- Intrinsic::ID IID = Intrinsic::not_intrinsic;
- switch (BuiltinID) {
- default: break;
- case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
- Subtract = true;
- LLVM_FALLTHROUGH;
- case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
- case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
- case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
- case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
- Subtract = true;
- LLVM_FALLTHROUGH;
- case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
- case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
- case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
- case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
- Subtract = true;
- LLVM_FALLTHROUGH;
- case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
- case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
- case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
- break;
- case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
- Subtract = true;
- LLVM_FALLTHROUGH;
- case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
- case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
- case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
- IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
- break;
- }
- Value *A = Ops[0];
- Value *B = Ops[1];
- Value *C = Ops[2];
- if (Subtract)
- C = CGF.Builder.CreateFNeg(C);
- Value *Res;
- // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
- if (IID != Intrinsic::not_intrinsic &&
- cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) {
- Function *Intr = CGF.CGM.getIntrinsic(IID);
- Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
- } else {
- llvm::Type *Ty = A->getType();
- Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
- Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
- if (IsAddSub) {
- // Negate even elts in C using a mask.
- unsigned NumElts = Ty->getVectorNumElements();
- SmallVector<uint32_t, 16> Indices(NumElts);
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i + (i % 2) * NumElts;
- Value *NegC = CGF.Builder.CreateFNeg(C);
- Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
- Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
- }
- }
- // Handle any required masking.
- Value *MaskFalseVal = nullptr;
- switch (BuiltinID) {
- case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
- case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
- case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
- case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
- MaskFalseVal = Ops[0];
- break;
- case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
- case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
- case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
- case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
- MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
- break;
- case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
- case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
- case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
- case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
- case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
- case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
- case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
- case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
- MaskFalseVal = Ops[2];
- break;
- }
- if (MaskFalseVal)
- return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
- return Res;
- }
- static Value *
- EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
- Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
- bool NegAcc = false) {
- unsigned Rnd = 4;
- if (Ops.size() > 4)
- Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
- if (NegAcc)
- Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
- Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
- Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
- Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
- Value *Res;
- if (Rnd != 4) {
- Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
- Intrinsic::x86_avx512_vfmadd_f32 :
- Intrinsic::x86_avx512_vfmadd_f64;
- Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
- {Ops[0], Ops[1], Ops[2], Ops[4]});
- } else {
- Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
- Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
- }
- // If we have more than 3 arguments, we need to do masking.
- if (Ops.size() > 3) {
- Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
- : Ops[PTIdx];
- // If we negated the accumulator and the its the PassThru value we need to
- // bypass the negate. Conveniently Upper should be the same thing in this
- // case.
- if (NegAcc && PTIdx == 2)
- PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
- Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
- }
- return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
- }
- static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
- ArrayRef<Value *> Ops) {
- llvm::Type *Ty = Ops[0]->getType();
- // Arguments have a vXi32 type so cast to vXi64.
- Ty = llvm::VectorType::get(CGF.Int64Ty,
- Ty->getPrimitiveSizeInBits() / 64);
- Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
- Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
- if (IsSigned) {
- // Shift left then arithmetic shift right.
- Constant *ShiftAmt = ConstantInt::get(Ty, 32);
- LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
- LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
- RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
- RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
- } else {
- // Clear the upper bits.
- Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
- LHS = CGF.Builder.CreateAnd(LHS, Mask);
- RHS = CGF.Builder.CreateAnd(RHS, Mask);
- }
- return CGF.Builder.CreateMul(LHS, RHS);
- }
- // Emit a masked pternlog intrinsic. This only exists because the header has to
- // use a macro and we aren't able to pass the input argument to a pternlog
- // builtin and a select builtin without evaluating it twice.
- static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
- ArrayRef<Value *> Ops) {
- llvm::Type *Ty = Ops[0]->getType();
- unsigned VecWidth = Ty->getPrimitiveSizeInBits();
- unsigned EltWidth = Ty->getScalarSizeInBits();
- Intrinsic::ID IID;
- if (VecWidth == 128 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_pternlog_d_128;
- else if (VecWidth == 256 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_pternlog_d_256;
- else if (VecWidth == 512 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_pternlog_d_512;
- else if (VecWidth == 128 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_pternlog_q_128;
- else if (VecWidth == 256 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_pternlog_q_256;
- else if (VecWidth == 512 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_pternlog_q_512;
- else
- llvm_unreachable("Unexpected intrinsic");
- Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
- Ops.drop_back());
- Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
- return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
- }
- static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
- llvm::Type *DstTy) {
- unsigned NumberOfElements = DstTy->getVectorNumElements();
- Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
- return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
- }
- // Emit addition or subtraction with signed/unsigned saturation.
- static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, bool IsSigned,
- bool IsAddition) {
- Intrinsic::ID IID =
- IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
- : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
- llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
- return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
- }
- Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
- const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
- StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
- return EmitX86CpuIs(CPUStr);
- }
- // Convert a BF16 to a float.
- static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
- const CallExpr *E,
- ArrayRef<Value *> Ops) {
- llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
- Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
- Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
- llvm::Type *ResultType = CGF.ConvertType(E->getType());
- Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
- return BitCast;
- }
- Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
- llvm::Type *Int32Ty = Builder.getInt32Ty();
- // Matching the struct layout from the compiler-rt/libgcc structure that is
- // filled in:
- // unsigned int __cpu_vendor;
- // unsigned int __cpu_type;
- // unsigned int __cpu_subtype;
- // unsigned int __cpu_features[1];
- llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
- llvm::ArrayType::get(Int32Ty, 1));
- // Grab the global __cpu_model.
- llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
- cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
- // Calculate the index needed to access the correct field based on the
- // range. Also adjust the expected value.
- unsigned Index;
- unsigned Value;
- std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
- #define X86_VENDOR(ENUM, STRING) \
- .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
- #define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
- .Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
- #define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \
- .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
- #define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \
- .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
- #include "llvm/Support/X86TargetParser.def"
- .Default({0, 0});
- assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
- // Grab the appropriate field from __cpu_model.
- llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
- ConstantInt::get(Int32Ty, Index)};
- llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
- CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
- // Check the value of the field against the requested value.
- return Builder.CreateICmpEQ(CpuValue,
- llvm::ConstantInt::get(Int32Ty, Value));
- }
- Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
- const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
- StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
- return EmitX86CpuSupports(FeatureStr);
- }
- uint64_t
- CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
- // Processor features and mapping to processor feature value.
- uint64_t FeaturesMask = 0;
- for (const StringRef &FeatureStr : FeatureStrs) {
- unsigned Feature =
- StringSwitch<unsigned>(FeatureStr)
- #define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
- #include "llvm/Support/X86TargetParser.def"
- ;
- FeaturesMask |= (1ULL << Feature);
- }
- return FeaturesMask;
- }
- Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
- return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
- }
- llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
- uint32_t Features1 = Lo_32(FeaturesMask);
- uint32_t Features2 = Hi_32(FeaturesMask);
- Value *Result = Builder.getTrue();
- if (Features1 != 0) {
- // Matching the struct layout from the compiler-rt/libgcc structure that is
- // filled in:
- // unsigned int __cpu_vendor;
- // unsigned int __cpu_type;
- // unsigned int __cpu_subtype;
- // unsigned int __cpu_features[1];
- llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
- llvm::ArrayType::get(Int32Ty, 1));
- // Grab the global __cpu_model.
- llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
- cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
- // Grab the first (0th) element from the field __cpu_features off of the
- // global in the struct STy.
- Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
- Builder.getInt32(0)};
- Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
- Value *Features =
- Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
- // Check the value of the bit corresponding to the feature requested.
- Value *Mask = Builder.getInt32(Features1);
- Value *Bitset = Builder.CreateAnd(Features, Mask);
- Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
- Result = Builder.CreateAnd(Result, Cmp);
- }
- if (Features2 != 0) {
- llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
- "__cpu_features2");
- cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
- Value *Features =
- Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
- // Check the value of the bit corresponding to the feature requested.
- Value *Mask = Builder.getInt32(Features2);
- Value *Bitset = Builder.CreateAnd(Features, Mask);
- Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
- Result = Builder.CreateAnd(Result, Cmp);
- }
- return Result;
- }
- Value *CodeGenFunction::EmitX86CpuInit() {
- llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
- /*Variadic*/ false);
- llvm::FunctionCallee Func =
- CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
- cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
- cast<llvm::GlobalValue>(Func.getCallee())
- ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
- return Builder.CreateCall(Func);
- }
- Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- if (BuiltinID == X86::BI__builtin_cpu_is)
- return EmitX86CpuIs(E);
- if (BuiltinID == X86::BI__builtin_cpu_supports)
- return EmitX86CpuSupports(E);
- if (BuiltinID == X86::BI__builtin_cpu_init)
- return EmitX86CpuInit();
- SmallVector<Value*, 4> Ops;
- // Find out if any arguments are required to be integer constant expressions.
- unsigned ICEArguments = 0;
- ASTContext::GetBuiltinTypeError Error;
- getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
- assert(Error == ASTContext::GE_None && "Should not codegen an error");
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
- // If this is a normal argument, just emit it as a scalar.
- if ((ICEArguments & (1 << i)) == 0) {
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- continue;
- }
- // If this is required to be a constant, constant fold it so that we know
- // that the generated intrinsic gets a ConstantInt.
- llvm::APSInt Result;
- bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
- assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
- Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
- }
- // These exist so that the builtin that takes an immediate can be bounds
- // checked by clang to avoid passing bad immediates to the backend. Since
- // AVX has a larger immediate than SSE we would need separate builtins to
- // do the different bounds checking. Rather than create a clang specific
- // SSE only builtin, this implements eight separate builtins to match gcc
- // implementation.
- auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
- Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops);
- };
- // For the vector forms of FP comparisons, translate the builtins directly to
- // IR.
- // TODO: The builtins could be removed if the SSE header files used vector
- // extension comparisons directly (vector ordered/unordered may need
- // additional support via __builtin_isnan()).
- auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) {
- Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
- llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
- llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
- Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
- return Builder.CreateBitCast(Sext, FPVecTy);
- };
- switch (BuiltinID) {
- default: return nullptr;
- case X86::BI_mm_prefetch: {
- Value *Address = Ops[0];
- ConstantInt *C = cast<ConstantInt>(Ops[1]);
- Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
- Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
- Value *Data = ConstantInt::get(Int32Ty, 1);
- Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
- return Builder.CreateCall(F, {Address, RW, Locality, Data});
- }
- case X86::BI_mm_clflush: {
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
- Ops[0]);
- }
- case X86::BI_mm_lfence: {
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
- }
- case X86::BI_mm_mfence: {
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
- }
- case X86::BI_mm_sfence: {
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
- }
- case X86::BI_mm_pause: {
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
- }
- case X86::BI__rdtsc: {
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
- }
- case X86::BI__builtin_ia32_rdtscp: {
- Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
- Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
- Ops[0]);
- return Builder.CreateExtractValue(Call, 0);
- }
- case X86::BI__builtin_ia32_lzcnt_u16:
- case X86::BI__builtin_ia32_lzcnt_u32:
- case X86::BI__builtin_ia32_lzcnt_u64: {
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
- }
- case X86::BI__builtin_ia32_tzcnt_u16:
- case X86::BI__builtin_ia32_tzcnt_u32:
- case X86::BI__builtin_ia32_tzcnt_u64: {
- Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
- }
- case X86::BI__builtin_ia32_undef128:
- case X86::BI__builtin_ia32_undef256:
- case X86::BI__builtin_ia32_undef512:
- // The x86 definition of "undef" is not the same as the LLVM definition
- // (PR32176). We leave optimizing away an unnecessary zero constant to the
- // IR optimizer and backend.
- // TODO: If we had a "freeze" IR instruction to generate a fixed undef
- // value, we should use that here instead of a zero.
- return llvm::Constant::getNullValue(ConvertType(E->getType()));
- case X86::BI__builtin_ia32_vec_init_v8qi:
- case X86::BI__builtin_ia32_vec_init_v4hi:
- case X86::BI__builtin_ia32_vec_init_v2si:
- return Builder.CreateBitCast(BuildVector(Ops),
- llvm::Type::getX86_MMXTy(getLLVMContext()));
- case X86::BI__builtin_ia32_vec_ext_v2si:
- case X86::BI__builtin_ia32_vec_ext_v16qi:
- case X86::BI__builtin_ia32_vec_ext_v8hi:
- case X86::BI__builtin_ia32_vec_ext_v4si:
- case X86::BI__builtin_ia32_vec_ext_v4sf:
- case X86::BI__builtin_ia32_vec_ext_v2di:
- case X86::BI__builtin_ia32_vec_ext_v32qi:
- case X86::BI__builtin_ia32_vec_ext_v16hi:
- case X86::BI__builtin_ia32_vec_ext_v8si:
- case X86::BI__builtin_ia32_vec_ext_v4di: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
- Index &= NumElts - 1;
- // These builtins exist so we can ensure the index is an ICE and in range.
- // Otherwise we could just do this in the header file.
- return Builder.CreateExtractElement(Ops[0], Index);
- }
- case X86::BI__builtin_ia32_vec_set_v16qi:
- case X86::BI__builtin_ia32_vec_set_v8hi:
- case X86::BI__builtin_ia32_vec_set_v4si:
- case X86::BI__builtin_ia32_vec_set_v2di:
- case X86::BI__builtin_ia32_vec_set_v32qi:
- case X86::BI__builtin_ia32_vec_set_v16hi:
- case X86::BI__builtin_ia32_vec_set_v8si:
- case X86::BI__builtin_ia32_vec_set_v4di: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
- Index &= NumElts - 1;
- // These builtins exist so we can ensure the index is an ICE and in range.
- // Otherwise we could just do this in the header file.
- return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
- }
- case X86::BI_mm_setcsr:
- case X86::BI__builtin_ia32_ldmxcsr: {
- Address Tmp = CreateMemTemp(E->getArg(0)->getType());
- Builder.CreateStore(Ops[0], Tmp);
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
- Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
- }
- case X86::BI_mm_getcsr:
- case X86::BI__builtin_ia32_stmxcsr: {
- Address Tmp = CreateMemTemp(E->getType());
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
- Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
- return Builder.CreateLoad(Tmp, "stmxcsr");
- }
- case X86::BI__builtin_ia32_xsave:
- case X86::BI__builtin_ia32_xsave64:
- case X86::BI__builtin_ia32_xrstor:
- case X86::BI__builtin_ia32_xrstor64:
- case X86::BI__builtin_ia32_xsaveopt:
- case X86::BI__builtin_ia32_xsaveopt64:
- case X86::BI__builtin_ia32_xrstors:
- case X86::BI__builtin_ia32_xrstors64:
- case X86::BI__builtin_ia32_xsavec:
- case X86::BI__builtin_ia32_xsavec64:
- case X86::BI__builtin_ia32_xsaves:
- case X86::BI__builtin_ia32_xsaves64:
- case X86::BI__builtin_ia32_xsetbv:
- case X86::BI_xsetbv: {
- Intrinsic::ID ID;
- #define INTRINSIC_X86_XSAVE_ID(NAME) \
- case X86::BI__builtin_ia32_##NAME: \
- ID = Intrinsic::x86_##NAME; \
- break
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- INTRINSIC_X86_XSAVE_ID(xsave);
- INTRINSIC_X86_XSAVE_ID(xsave64);
- INTRINSIC_X86_XSAVE_ID(xrstor);
- INTRINSIC_X86_XSAVE_ID(xrstor64);
- INTRINSIC_X86_XSAVE_ID(xsaveopt);
- INTRINSIC_X86_XSAVE_ID(xsaveopt64);
- INTRINSIC_X86_XSAVE_ID(xrstors);
- INTRINSIC_X86_XSAVE_ID(xrstors64);
- INTRINSIC_X86_XSAVE_ID(xsavec);
- INTRINSIC_X86_XSAVE_ID(xsavec64);
- INTRINSIC_X86_XSAVE_ID(xsaves);
- INTRINSIC_X86_XSAVE_ID(xsaves64);
- INTRINSIC_X86_XSAVE_ID(xsetbv);
- case X86::BI_xsetbv:
- ID = Intrinsic::x86_xsetbv;
- break;
- }
- #undef INTRINSIC_X86_XSAVE_ID
- Value *Mhi = Builder.CreateTrunc(
- Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
- Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
- Ops[1] = Mhi;
- Ops.push_back(Mlo);
- return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- }
- case X86::BI__builtin_ia32_xgetbv:
- case X86::BI_xgetbv:
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
- case X86::BI__builtin_ia32_storedqudi128_mask:
- case X86::BI__builtin_ia32_storedqusi128_mask:
- case X86::BI__builtin_ia32_storedquhi128_mask:
- case X86::BI__builtin_ia32_storedquqi128_mask:
- case X86::BI__builtin_ia32_storeupd128_mask:
- case X86::BI__builtin_ia32_storeups128_mask:
- case X86::BI__builtin_ia32_storedqudi256_mask:
- case X86::BI__builtin_ia32_storedqusi256_mask:
- case X86::BI__builtin_ia32_storedquhi256_mask:
- case X86::BI__builtin_ia32_storedquqi256_mask:
- case X86::BI__builtin_ia32_storeupd256_mask:
- case X86::BI__builtin_ia32_storeups256_mask:
- case X86::BI__builtin_ia32_storedqudi512_mask:
- case X86::BI__builtin_ia32_storedqusi512_mask:
- case X86::BI__builtin_ia32_storedquhi512_mask:
- case X86::BI__builtin_ia32_storedquqi512_mask:
- case X86::BI__builtin_ia32_storeupd512_mask:
- case X86::BI__builtin_ia32_storeups512_mask:
- return EmitX86MaskedStore(*this, Ops, 1);
- case X86::BI__builtin_ia32_storess128_mask:
- case X86::BI__builtin_ia32_storesd128_mask: {
- return EmitX86MaskedStore(*this, Ops, 1);
- }
- case X86::BI__builtin_ia32_vpopcntb_128:
- case X86::BI__builtin_ia32_vpopcntd_128:
- case X86::BI__builtin_ia32_vpopcntq_128:
- case X86::BI__builtin_ia32_vpopcntw_128:
- case X86::BI__builtin_ia32_vpopcntb_256:
- case X86::BI__builtin_ia32_vpopcntd_256:
- case X86::BI__builtin_ia32_vpopcntq_256:
- case X86::BI__builtin_ia32_vpopcntw_256:
- case X86::BI__builtin_ia32_vpopcntb_512:
- case X86::BI__builtin_ia32_vpopcntd_512:
- case X86::BI__builtin_ia32_vpopcntq_512:
- case X86::BI__builtin_ia32_vpopcntw_512: {
- llvm::Type *ResultType = ConvertType(E->getType());
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
- return Builder.CreateCall(F, Ops);
- }
- case X86::BI__builtin_ia32_cvtmask2b128:
- case X86::BI__builtin_ia32_cvtmask2b256:
- case X86::BI__builtin_ia32_cvtmask2b512:
- case X86::BI__builtin_ia32_cvtmask2w128:
- case X86::BI__builtin_ia32_cvtmask2w256:
- case X86::BI__builtin_ia32_cvtmask2w512:
- case X86::BI__builtin_ia32_cvtmask2d128:
- case X86::BI__builtin_ia32_cvtmask2d256:
- case X86::BI__builtin_ia32_cvtmask2d512:
- case X86::BI__builtin_ia32_cvtmask2q128:
- case X86::BI__builtin_ia32_cvtmask2q256:
- case X86::BI__builtin_ia32_cvtmask2q512:
- return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
- case X86::BI__builtin_ia32_cvtb2mask128:
- case X86::BI__builtin_ia32_cvtb2mask256:
- case X86::BI__builtin_ia32_cvtb2mask512:
- case X86::BI__builtin_ia32_cvtw2mask128:
- case X86::BI__builtin_ia32_cvtw2mask256:
- case X86::BI__builtin_ia32_cvtw2mask512:
- case X86::BI__builtin_ia32_cvtd2mask128:
- case X86::BI__builtin_ia32_cvtd2mask256:
- case X86::BI__builtin_ia32_cvtd2mask512:
- case X86::BI__builtin_ia32_cvtq2mask128:
- case X86::BI__builtin_ia32_cvtq2mask256:
- case X86::BI__builtin_ia32_cvtq2mask512:
- return EmitX86ConvertToMask(*this, Ops[0]);
- case X86::BI__builtin_ia32_cvtdq2ps512_mask:
- case X86::BI__builtin_ia32_cvtqq2ps512_mask:
- case X86::BI__builtin_ia32_cvtqq2pd512_mask:
- return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
- case X86::BI__builtin_ia32_cvtudq2ps512_mask:
- case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
- case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
- return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
- case X86::BI__builtin_ia32_vfmaddss3:
- case X86::BI__builtin_ia32_vfmaddsd3:
- case X86::BI__builtin_ia32_vfmaddss3_mask:
- case X86::BI__builtin_ia32_vfmaddsd3_mask:
- return EmitScalarFMAExpr(*this, Ops, Ops[0]);
- case X86::BI__builtin_ia32_vfmaddss:
- case X86::BI__builtin_ia32_vfmaddsd:
- return EmitScalarFMAExpr(*this, Ops,
- Constant::getNullValue(Ops[0]->getType()));
- case X86::BI__builtin_ia32_vfmaddss3_maskz:
- case X86::BI__builtin_ia32_vfmaddsd3_maskz:
- return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
- case X86::BI__builtin_ia32_vfmaddss3_mask3:
- case X86::BI__builtin_ia32_vfmaddsd3_mask3:
- return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
- case X86::BI__builtin_ia32_vfmsubss3_mask3:
- case X86::BI__builtin_ia32_vfmsubsd3_mask3:
- return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
- /*NegAcc*/true);
- case X86::BI__builtin_ia32_vfmaddps:
- case X86::BI__builtin_ia32_vfmaddpd:
- case X86::BI__builtin_ia32_vfmaddps256:
- case X86::BI__builtin_ia32_vfmaddpd256:
- case X86::BI__builtin_ia32_vfmaddps512_mask:
- case X86::BI__builtin_ia32_vfmaddps512_maskz:
- case X86::BI__builtin_ia32_vfmaddps512_mask3:
- case X86::BI__builtin_ia32_vfmsubps512_mask3:
- case X86::BI__builtin_ia32_vfmaddpd512_mask:
- case X86::BI__builtin_ia32_vfmaddpd512_maskz:
- case X86::BI__builtin_ia32_vfmaddpd512_mask3:
- case X86::BI__builtin_ia32_vfmsubpd512_mask3:
- return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
- case X86::BI__builtin_ia32_vfmaddsubps:
- case X86::BI__builtin_ia32_vfmaddsubpd:
- case X86::BI__builtin_ia32_vfmaddsubps256:
- case X86::BI__builtin_ia32_vfmaddsubpd256:
- case X86::BI__builtin_ia32_vfmaddsubps512_mask:
- case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
- case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
- case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
- case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
- case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
- return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
- case X86::BI__builtin_ia32_movdqa32store128_mask:
- case X86::BI__builtin_ia32_movdqa64store128_mask:
- case X86::BI__builtin_ia32_storeaps128_mask:
- case X86::BI__builtin_ia32_storeapd128_mask:
- case X86::BI__builtin_ia32_movdqa32store256_mask:
- case X86::BI__builtin_ia32_movdqa64store256_mask:
- case X86::BI__builtin_ia32_storeaps256_mask:
- case X86::BI__builtin_ia32_storeapd256_mask:
- case X86::BI__builtin_ia32_movdqa32store512_mask:
- case X86::BI__builtin_ia32_movdqa64store512_mask:
- case X86::BI__builtin_ia32_storeaps512_mask:
- case X86::BI__builtin_ia32_storeapd512_mask: {
- unsigned Align =
- getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
- return EmitX86MaskedStore(*this, Ops, Align);
- }
- case X86::BI__builtin_ia32_loadups128_mask:
- case X86::BI__builtin_ia32_loadups256_mask:
- case X86::BI__builtin_ia32_loadups512_mask:
- case X86::BI__builtin_ia32_loadupd128_mask:
- case X86::BI__builtin_ia32_loadupd256_mask:
- case X86::BI__builtin_ia32_loadupd512_mask:
- case X86::BI__builtin_ia32_loaddquqi128_mask:
- case X86::BI__builtin_ia32_loaddquqi256_mask:
- case X86::BI__builtin_ia32_loaddquqi512_mask:
- case X86::BI__builtin_ia32_loaddquhi128_mask:
- case X86::BI__builtin_ia32_loaddquhi256_mask:
- case X86::BI__builtin_ia32_loaddquhi512_mask:
- case X86::BI__builtin_ia32_loaddqusi128_mask:
- case X86::BI__builtin_ia32_loaddqusi256_mask:
- case X86::BI__builtin_ia32_loaddqusi512_mask:
- case X86::BI__builtin_ia32_loaddqudi128_mask:
- case X86::BI__builtin_ia32_loaddqudi256_mask:
- case X86::BI__builtin_ia32_loaddqudi512_mask:
- return EmitX86MaskedLoad(*this, Ops, 1);
- case X86::BI__builtin_ia32_loadss128_mask:
- case X86::BI__builtin_ia32_loadsd128_mask:
- return EmitX86MaskedLoad(*this, Ops, 1);
- case X86::BI__builtin_ia32_loadaps128_mask:
- case X86::BI__builtin_ia32_loadaps256_mask:
- case X86::BI__builtin_ia32_loadaps512_mask:
- case X86::BI__builtin_ia32_loadapd128_mask:
- case X86::BI__builtin_ia32_loadapd256_mask:
- case X86::BI__builtin_ia32_loadapd512_mask:
- case X86::BI__builtin_ia32_movdqa32load128_mask:
- case X86::BI__builtin_ia32_movdqa32load256_mask:
- case X86::BI__builtin_ia32_movdqa32load512_mask:
- case X86::BI__builtin_ia32_movdqa64load128_mask:
- case X86::BI__builtin_ia32_movdqa64load256_mask:
- case X86::BI__builtin_ia32_movdqa64load512_mask: {
- unsigned Align =
- getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
- return EmitX86MaskedLoad(*this, Ops, Align);
- }
- case X86::BI__builtin_ia32_expandloaddf128_mask:
- case X86::BI__builtin_ia32_expandloaddf256_mask:
- case X86::BI__builtin_ia32_expandloaddf512_mask:
- case X86::BI__builtin_ia32_expandloadsf128_mask:
- case X86::BI__builtin_ia32_expandloadsf256_mask:
- case X86::BI__builtin_ia32_expandloadsf512_mask:
- case X86::BI__builtin_ia32_expandloaddi128_mask:
- case X86::BI__builtin_ia32_expandloaddi256_mask:
- case X86::BI__builtin_ia32_expandloaddi512_mask:
- case X86::BI__builtin_ia32_expandloadsi128_mask:
- case X86::BI__builtin_ia32_expandloadsi256_mask:
- case X86::BI__builtin_ia32_expandloadsi512_mask:
- case X86::BI__builtin_ia32_expandloadhi128_mask:
- case X86::BI__builtin_ia32_expandloadhi256_mask:
- case X86::BI__builtin_ia32_expandloadhi512_mask:
- case X86::BI__builtin_ia32_expandloadqi128_mask:
- case X86::BI__builtin_ia32_expandloadqi256_mask:
- case X86::BI__builtin_ia32_expandloadqi512_mask:
- return EmitX86ExpandLoad(*this, Ops);
- case X86::BI__builtin_ia32_compressstoredf128_mask:
- case X86::BI__builtin_ia32_compressstoredf256_mask:
- case X86::BI__builtin_ia32_compressstoredf512_mask:
- case X86::BI__builtin_ia32_compressstoresf128_mask:
- case X86::BI__builtin_ia32_compressstoresf256_mask:
- case X86::BI__builtin_ia32_compressstoresf512_mask:
- case X86::BI__builtin_ia32_compressstoredi128_mask:
- case X86::BI__builtin_ia32_compressstoredi256_mask:
- case X86::BI__builtin_ia32_compressstoredi512_mask:
- case X86::BI__builtin_ia32_compressstoresi128_mask:
- case X86::BI__builtin_ia32_compressstoresi256_mask:
- case X86::BI__builtin_ia32_compressstoresi512_mask:
- case X86::BI__builtin_ia32_compressstorehi128_mask:
- case X86::BI__builtin_ia32_compressstorehi256_mask:
- case X86::BI__builtin_ia32_compressstorehi512_mask:
- case X86::BI__builtin_ia32_compressstoreqi128_mask:
- case X86::BI__builtin_ia32_compressstoreqi256_mask:
- case X86::BI__builtin_ia32_compressstoreqi512_mask:
- return EmitX86CompressStore(*this, Ops);
- case X86::BI__builtin_ia32_expanddf128_mask:
- case X86::BI__builtin_ia32_expanddf256_mask:
- case X86::BI__builtin_ia32_expanddf512_mask:
- case X86::BI__builtin_ia32_expandsf128_mask:
- case X86::BI__builtin_ia32_expandsf256_mask:
- case X86::BI__builtin_ia32_expandsf512_mask:
- case X86::BI__builtin_ia32_expanddi128_mask:
- case X86::BI__builtin_ia32_expanddi256_mask:
- case X86::BI__builtin_ia32_expanddi512_mask:
- case X86::BI__builtin_ia32_expandsi128_mask:
- case X86::BI__builtin_ia32_expandsi256_mask:
- case X86::BI__builtin_ia32_expandsi512_mask:
- case X86::BI__builtin_ia32_expandhi128_mask:
- case X86::BI__builtin_ia32_expandhi256_mask:
- case X86::BI__builtin_ia32_expandhi512_mask:
- case X86::BI__builtin_ia32_expandqi128_mask:
- case X86::BI__builtin_ia32_expandqi256_mask:
- case X86::BI__builtin_ia32_expandqi512_mask:
- return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
- case X86::BI__builtin_ia32_compressdf128_mask:
- case X86::BI__builtin_ia32_compressdf256_mask:
- case X86::BI__builtin_ia32_compressdf512_mask:
- case X86::BI__builtin_ia32_compresssf128_mask:
- case X86::BI__builtin_ia32_compresssf256_mask:
- case X86::BI__builtin_ia32_compresssf512_mask:
- case X86::BI__builtin_ia32_compressdi128_mask:
- case X86::BI__builtin_ia32_compressdi256_mask:
- case X86::BI__builtin_ia32_compressdi512_mask:
- case X86::BI__builtin_ia32_compresssi128_mask:
- case X86::BI__builtin_ia32_compresssi256_mask:
- case X86::BI__builtin_ia32_compresssi512_mask:
- case X86::BI__builtin_ia32_compresshi128_mask:
- case X86::BI__builtin_ia32_compresshi256_mask:
- case X86::BI__builtin_ia32_compresshi512_mask:
- case X86::BI__builtin_ia32_compressqi128_mask:
- case X86::BI__builtin_ia32_compressqi256_mask:
- case X86::BI__builtin_ia32_compressqi512_mask:
- return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
- case X86::BI__builtin_ia32_gather3div2df:
- case X86::BI__builtin_ia32_gather3div2di:
- case X86::BI__builtin_ia32_gather3div4df:
- case X86::BI__builtin_ia32_gather3div4di:
- case X86::BI__builtin_ia32_gather3div4sf:
- case X86::BI__builtin_ia32_gather3div4si:
- case X86::BI__builtin_ia32_gather3div8sf:
- case X86::BI__builtin_ia32_gather3div8si:
- case X86::BI__builtin_ia32_gather3siv2df:
- case X86::BI__builtin_ia32_gather3siv2di:
- case X86::BI__builtin_ia32_gather3siv4df:
- case X86::BI__builtin_ia32_gather3siv4di:
- case X86::BI__builtin_ia32_gather3siv4sf:
- case X86::BI__builtin_ia32_gather3siv4si:
- case X86::BI__builtin_ia32_gather3siv8sf:
- case X86::BI__builtin_ia32_gather3siv8si:
- case X86::BI__builtin_ia32_gathersiv8df:
- case X86::BI__builtin_ia32_gathersiv16sf:
- case X86::BI__builtin_ia32_gatherdiv8df:
- case X86::BI__builtin_ia32_gatherdiv16sf:
- case X86::BI__builtin_ia32_gathersiv8di:
- case X86::BI__builtin_ia32_gathersiv16si:
- case X86::BI__builtin_ia32_gatherdiv8di:
- case X86::BI__builtin_ia32_gatherdiv16si: {
- Intrinsic::ID IID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unexpected builtin");
- case X86::BI__builtin_ia32_gather3div2df:
- IID = Intrinsic::x86_avx512_mask_gather3div2_df;
- break;
- case X86::BI__builtin_ia32_gather3div2di:
- IID = Intrinsic::x86_avx512_mask_gather3div2_di;
- break;
- case X86::BI__builtin_ia32_gather3div4df:
- IID = Intrinsic::x86_avx512_mask_gather3div4_df;
- break;
- case X86::BI__builtin_ia32_gather3div4di:
- IID = Intrinsic::x86_avx512_mask_gather3div4_di;
- break;
- case X86::BI__builtin_ia32_gather3div4sf:
- IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
- break;
- case X86::BI__builtin_ia32_gather3div4si:
- IID = Intrinsic::x86_avx512_mask_gather3div4_si;
- break;
- case X86::BI__builtin_ia32_gather3div8sf:
- IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
- break;
- case X86::BI__builtin_ia32_gather3div8si:
- IID = Intrinsic::x86_avx512_mask_gather3div8_si;
- break;
- case X86::BI__builtin_ia32_gather3siv2df:
- IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
- break;
- case X86::BI__builtin_ia32_gather3siv2di:
- IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
- break;
- case X86::BI__builtin_ia32_gather3siv4df:
- IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
- break;
- case X86::BI__builtin_ia32_gather3siv4di:
- IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
- break;
- case X86::BI__builtin_ia32_gather3siv4sf:
- IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
- break;
- case X86::BI__builtin_ia32_gather3siv4si:
- IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
- break;
- case X86::BI__builtin_ia32_gather3siv8sf:
- IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
- break;
- case X86::BI__builtin_ia32_gather3siv8si:
- IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
- break;
- case X86::BI__builtin_ia32_gathersiv8df:
- IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
- break;
- case X86::BI__builtin_ia32_gathersiv16sf:
- IID = Intrinsic::x86_avx512_mask_gather_dps_512;
- break;
- case X86::BI__builtin_ia32_gatherdiv8df:
- IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
- break;
- case X86::BI__builtin_ia32_gatherdiv16sf:
- IID = Intrinsic::x86_avx512_mask_gather_qps_512;
- break;
- case X86::BI__builtin_ia32_gathersiv8di:
- IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
- break;
- case X86::BI__builtin_ia32_gathersiv16si:
- IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
- break;
- case X86::BI__builtin_ia32_gatherdiv8di:
- IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
- break;
- case X86::BI__builtin_ia32_gatherdiv16si:
- IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
- break;
- }
- unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(),
- Ops[2]->getType()->getVectorNumElements());
- Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
- Function *Intr = CGM.getIntrinsic(IID);
- return Builder.CreateCall(Intr, Ops);
- }
- case X86::BI__builtin_ia32_scattersiv8df:
- case X86::BI__builtin_ia32_scattersiv16sf:
- case X86::BI__builtin_ia32_scatterdiv8df:
- case X86::BI__builtin_ia32_scatterdiv16sf:
- case X86::BI__builtin_ia32_scattersiv8di:
- case X86::BI__builtin_ia32_scattersiv16si:
- case X86::BI__builtin_ia32_scatterdiv8di:
- case X86::BI__builtin_ia32_scatterdiv16si:
- case X86::BI__builtin_ia32_scatterdiv2df:
- case X86::BI__builtin_ia32_scatterdiv2di:
- case X86::BI__builtin_ia32_scatterdiv4df:
- case X86::BI__builtin_ia32_scatterdiv4di:
- case X86::BI__builtin_ia32_scatterdiv4sf:
- case X86::BI__builtin_ia32_scatterdiv4si:
- case X86::BI__builtin_ia32_scatterdiv8sf:
- case X86::BI__builtin_ia32_scatterdiv8si:
- case X86::BI__builtin_ia32_scattersiv2df:
- case X86::BI__builtin_ia32_scattersiv2di:
- case X86::BI__builtin_ia32_scattersiv4df:
- case X86::BI__builtin_ia32_scattersiv4di:
- case X86::BI__builtin_ia32_scattersiv4sf:
- case X86::BI__builtin_ia32_scattersiv4si:
- case X86::BI__builtin_ia32_scattersiv8sf:
- case X86::BI__builtin_ia32_scattersiv8si: {
- Intrinsic::ID IID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unexpected builtin");
- case X86::BI__builtin_ia32_scattersiv8df:
- IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
- break;
- case X86::BI__builtin_ia32_scattersiv16sf:
- IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
- break;
- case X86::BI__builtin_ia32_scatterdiv8df:
- IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
- break;
- case X86::BI__builtin_ia32_scatterdiv16sf:
- IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
- break;
- case X86::BI__builtin_ia32_scattersiv8di:
- IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
- break;
- case X86::BI__builtin_ia32_scattersiv16si:
- IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
- break;
- case X86::BI__builtin_ia32_scatterdiv8di:
- IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
- break;
- case X86::BI__builtin_ia32_scatterdiv16si:
- IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
- break;
- case X86::BI__builtin_ia32_scatterdiv2df:
- IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
- break;
- case X86::BI__builtin_ia32_scatterdiv2di:
- IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
- break;
- case X86::BI__builtin_ia32_scatterdiv4df:
- IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
- break;
- case X86::BI__builtin_ia32_scatterdiv4di:
- IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
- break;
- case X86::BI__builtin_ia32_scatterdiv4sf:
- IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
- break;
- case X86::BI__builtin_ia32_scatterdiv4si:
- IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
- break;
- case X86::BI__builtin_ia32_scatterdiv8sf:
- IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
- break;
- case X86::BI__builtin_ia32_scatterdiv8si:
- IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
- break;
- case X86::BI__builtin_ia32_scattersiv2df:
- IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
- break;
- case X86::BI__builtin_ia32_scattersiv2di:
- IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
- break;
- case X86::BI__builtin_ia32_scattersiv4df:
- IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
- break;
- case X86::BI__builtin_ia32_scattersiv4di:
- IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
- break;
- case X86::BI__builtin_ia32_scattersiv4sf:
- IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
- break;
- case X86::BI__builtin_ia32_scattersiv4si:
- IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
- break;
- case X86::BI__builtin_ia32_scattersiv8sf:
- IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
- break;
- case X86::BI__builtin_ia32_scattersiv8si:
- IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
- break;
- }
- unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(),
- Ops[3]->getType()->getVectorNumElements());
- Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
- Function *Intr = CGM.getIntrinsic(IID);
- return Builder.CreateCall(Intr, Ops);
- }
- case X86::BI__builtin_ia32_vextractf128_pd256:
- case X86::BI__builtin_ia32_vextractf128_ps256:
- case X86::BI__builtin_ia32_vextractf128_si256:
- case X86::BI__builtin_ia32_extract128i256:
- case X86::BI__builtin_ia32_extractf64x4_mask:
- case X86::BI__builtin_ia32_extractf32x4_mask:
- case X86::BI__builtin_ia32_extracti64x4_mask:
- case X86::BI__builtin_ia32_extracti32x4_mask:
- case X86::BI__builtin_ia32_extractf32x8_mask:
- case X86::BI__builtin_ia32_extracti32x8_mask:
- case X86::BI__builtin_ia32_extractf32x4_256_mask:
- case X86::BI__builtin_ia32_extracti32x4_256_mask:
- case X86::BI__builtin_ia32_extractf64x2_256_mask:
- case X86::BI__builtin_ia32_extracti64x2_256_mask:
- case X86::BI__builtin_ia32_extractf64x2_512_mask:
- case X86::BI__builtin_ia32_extracti64x2_512_mask: {
- llvm::Type *DstTy = ConvertType(E->getType());
- unsigned NumElts = DstTy->getVectorNumElements();
- unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements();
- unsigned SubVectors = SrcNumElts / NumElts;
- unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
- assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
- Index &= SubVectors - 1; // Remove any extra bits.
- Index *= NumElts;
- uint32_t Indices[16];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i + Index;
- Value *Res = Builder.CreateShuffleVector(Ops[0],
- UndefValue::get(Ops[0]->getType()),
- makeArrayRef(Indices, NumElts),
- "extract");
- if (Ops.size() == 4)
- Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
- return Res;
- }
- case X86::BI__builtin_ia32_vinsertf128_pd256:
- case X86::BI__builtin_ia32_vinsertf128_ps256:
- case X86::BI__builtin_ia32_vinsertf128_si256:
- case X86::BI__builtin_ia32_insert128i256:
- case X86::BI__builtin_ia32_insertf64x4:
- case X86::BI__builtin_ia32_insertf32x4:
- case X86::BI__builtin_ia32_inserti64x4:
- case X86::BI__builtin_ia32_inserti32x4:
- case X86::BI__builtin_ia32_insertf32x8:
- case X86::BI__builtin_ia32_inserti32x8:
- case X86::BI__builtin_ia32_insertf32x4_256:
- case X86::BI__builtin_ia32_inserti32x4_256:
- case X86::BI__builtin_ia32_insertf64x2_256:
- case X86::BI__builtin_ia32_inserti64x2_256:
- case X86::BI__builtin_ia32_insertf64x2_512:
- case X86::BI__builtin_ia32_inserti64x2_512: {
- unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements();
- unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements();
- unsigned SubVectors = DstNumElts / SrcNumElts;
- unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
- assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
- Index &= SubVectors - 1; // Remove any extra bits.
- Index *= SrcNumElts;
- uint32_t Indices[16];
- for (unsigned i = 0; i != DstNumElts; ++i)
- Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
- Value *Op1 = Builder.CreateShuffleVector(Ops[1],
- UndefValue::get(Ops[1]->getType()),
- makeArrayRef(Indices, DstNumElts),
- "widen");
- for (unsigned i = 0; i != DstNumElts; ++i) {
- if (i >= Index && i < (Index + SrcNumElts))
- Indices[i] = (i - Index) + DstNumElts;
- else
- Indices[i] = i;
- }
- return Builder.CreateShuffleVector(Ops[0], Op1,
- makeArrayRef(Indices, DstNumElts),
- "insert");
- }
- case X86::BI__builtin_ia32_pmovqd512_mask:
- case X86::BI__builtin_ia32_pmovwb512_mask: {
- Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
- return EmitX86Select(*this, Ops[2], Res, Ops[1]);
- }
- case X86::BI__builtin_ia32_pmovdb512_mask:
- case X86::BI__builtin_ia32_pmovdw512_mask:
- case X86::BI__builtin_ia32_pmovqw512_mask: {
- if (const auto *C = dyn_cast<Constant>(Ops[2]))
- if (C->isAllOnesValue())
- return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
- Intrinsic::ID IID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_pmovdb512_mask:
- IID = Intrinsic::x86_avx512_mask_pmov_db_512;
- break;
- case X86::BI__builtin_ia32_pmovdw512_mask:
- IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
- break;
- case X86::BI__builtin_ia32_pmovqw512_mask:
- IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
- break;
- }
- Function *Intr = CGM.getIntrinsic(IID);
- return Builder.CreateCall(Intr, Ops);
- }
- case X86::BI__builtin_ia32_pblendw128:
- case X86::BI__builtin_ia32_blendpd:
- case X86::BI__builtin_ia32_blendps:
- case X86::BI__builtin_ia32_blendpd256:
- case X86::BI__builtin_ia32_blendps256:
- case X86::BI__builtin_ia32_pblendw256:
- case X86::BI__builtin_ia32_pblendd128:
- case X86::BI__builtin_ia32_pblendd256: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- uint32_t Indices[16];
- // If there are more than 8 elements, the immediate is used twice so make
- // sure we handle that.
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
- return Builder.CreateShuffleVector(Ops[0], Ops[1],
- makeArrayRef(Indices, NumElts),
- "blend");
- }
- case X86::BI__builtin_ia32_pshuflw:
- case X86::BI__builtin_ia32_pshuflw256:
- case X86::BI__builtin_ia32_pshuflw512: {
- uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
- // Splat the 8-bits of immediate 4 times to help the loop wrap around.
- Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[32];
- for (unsigned l = 0; l != NumElts; l += 8) {
- for (unsigned i = 0; i != 4; ++i) {
- Indices[l + i] = l + (Imm & 3);
- Imm >>= 2;
- }
- for (unsigned i = 4; i != 8; ++i)
- Indices[l + i] = l + i;
- }
- return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
- makeArrayRef(Indices, NumElts),
- "pshuflw");
- }
- case X86::BI__builtin_ia32_pshufhw:
- case X86::BI__builtin_ia32_pshufhw256:
- case X86::BI__builtin_ia32_pshufhw512: {
- uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
- // Splat the 8-bits of immediate 4 times to help the loop wrap around.
- Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[32];
- for (unsigned l = 0; l != NumElts; l += 8) {
- for (unsigned i = 0; i != 4; ++i)
- Indices[l + i] = l + i;
- for (unsigned i = 4; i != 8; ++i) {
- Indices[l + i] = l + 4 + (Imm & 3);
- Imm >>= 2;
- }
- }
- return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
- makeArrayRef(Indices, NumElts),
- "pshufhw");
- }
- case X86::BI__builtin_ia32_pshufd:
- case X86::BI__builtin_ia32_pshufd256:
- case X86::BI__builtin_ia32_pshufd512:
- case X86::BI__builtin_ia32_vpermilpd:
- case X86::BI__builtin_ia32_vpermilps:
- case X86::BI__builtin_ia32_vpermilpd256:
- case X86::BI__builtin_ia32_vpermilps256:
- case X86::BI__builtin_ia32_vpermilpd512:
- case X86::BI__builtin_ia32_vpermilps512: {
- uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
- unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
- unsigned NumLaneElts = NumElts / NumLanes;
- // Splat the 8-bits of immediate 4 times to help the loop wrap around.
- Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[16];
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = 0; i != NumLaneElts; ++i) {
- Indices[i + l] = (Imm % NumLaneElts) + l;
- Imm /= NumLaneElts;
- }
- }
- return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
- makeArrayRef(Indices, NumElts),
- "permil");
- }
- case X86::BI__builtin_ia32_shufpd:
- case X86::BI__builtin_ia32_shufpd256:
- case X86::BI__builtin_ia32_shufpd512:
- case X86::BI__builtin_ia32_shufps:
- case X86::BI__builtin_ia32_shufps256:
- case X86::BI__builtin_ia32_shufps512: {
- uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
- unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
- unsigned NumLaneElts = NumElts / NumLanes;
- // Splat the 8-bits of immediate 4 times to help the loop wrap around.
- Imm = (Imm & 0xff) * 0x01010101;
- uint32_t Indices[16];
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- for (unsigned i = 0; i != NumLaneElts; ++i) {
- unsigned Index = Imm % NumLaneElts;
- Imm /= NumLaneElts;
- if (i >= (NumLaneElts / 2))
- Index += NumElts;
- Indices[l + i] = l + Index;
- }
- }
- return Builder.CreateShuffleVector(Ops[0], Ops[1],
- makeArrayRef(Indices, NumElts),
- "shufp");
- }
- case X86::BI__builtin_ia32_permdi256:
- case X86::BI__builtin_ia32_permdf256:
- case X86::BI__builtin_ia32_permdi512:
- case X86::BI__builtin_ia32_permdf512: {
- unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
- // These intrinsics operate on 256-bit lanes of four 64-bit elements.
- uint32_t Indices[8];
- for (unsigned l = 0; l != NumElts; l += 4)
- for (unsigned i = 0; i != 4; ++i)
- Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
- return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
- makeArrayRef(Indices, NumElts),
- "perm");
- }
- case X86::BI__builtin_ia32_palignr128:
- case X86::BI__builtin_ia32_palignr256:
- case X86::BI__builtin_ia32_palignr512: {
- unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- assert(NumElts % 16 == 0);
- // If palignr is shifting the pair of vectors more than the size of two
- // lanes, emit zero.
- if (ShiftVal >= 32)
- return llvm::Constant::getNullValue(ConvertType(E->getType()));
- // If palignr is shifting the pair of input vectors more than one lane,
- // but less than two lanes, convert to shifting in zeroes.
- if (ShiftVal > 16) {
- ShiftVal -= 16;
- Ops[1] = Ops[0];
- Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
- }
- uint32_t Indices[64];
- // 256-bit palignr operates on 128-bit lanes so we need to handle that
- for (unsigned l = 0; l != NumElts; l += 16) {
- for (unsigned i = 0; i != 16; ++i) {
- unsigned Idx = ShiftVal + i;
- if (Idx >= 16)
- Idx += NumElts - 16; // End of lane, switch operand.
- Indices[l + i] = Idx + l;
- }
- }
- return Builder.CreateShuffleVector(Ops[1], Ops[0],
- makeArrayRef(Indices, NumElts),
- "palignr");
- }
- case X86::BI__builtin_ia32_alignd128:
- case X86::BI__builtin_ia32_alignd256:
- case X86::BI__builtin_ia32_alignd512:
- case X86::BI__builtin_ia32_alignq128:
- case X86::BI__builtin_ia32_alignq256:
- case X86::BI__builtin_ia32_alignq512: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
- // Mask the shift amount to width of two vectors.
- ShiftVal &= (2 * NumElts) - 1;
- uint32_t Indices[16];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i + ShiftVal;
- return Builder.CreateShuffleVector(Ops[1], Ops[0],
- makeArrayRef(Indices, NumElts),
- "valign");
- }
- case X86::BI__builtin_ia32_shuf_f32x4_256:
- case X86::BI__builtin_ia32_shuf_f64x2_256:
- case X86::BI__builtin_ia32_shuf_i32x4_256:
- case X86::BI__builtin_ia32_shuf_i64x2_256:
- case X86::BI__builtin_ia32_shuf_f32x4:
- case X86::BI__builtin_ia32_shuf_f64x2:
- case X86::BI__builtin_ia32_shuf_i32x4:
- case X86::BI__builtin_ia32_shuf_i64x2: {
- unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- llvm::Type *Ty = Ops[0]->getType();
- unsigned NumElts = Ty->getVectorNumElements();
- unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
- unsigned NumLaneElts = NumElts / NumLanes;
- uint32_t Indices[16];
- for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
- unsigned Index = (Imm % NumLanes) * NumLaneElts;
- Imm /= NumLanes; // Discard the bits we just used.
- if (l >= (NumElts / 2))
- Index += NumElts; // Switch to other source.
- for (unsigned i = 0; i != NumLaneElts; ++i) {
- Indices[l + i] = Index + i;
- }
- }
- return Builder.CreateShuffleVector(Ops[0], Ops[1],
- makeArrayRef(Indices, NumElts),
- "shuf");
- }
- case X86::BI__builtin_ia32_vperm2f128_pd256:
- case X86::BI__builtin_ia32_vperm2f128_ps256:
- case X86::BI__builtin_ia32_vperm2f128_si256:
- case X86::BI__builtin_ia32_permti256: {
- unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- // This takes a very simple approach since there are two lanes and a
- // shuffle can have 2 inputs. So we reserve the first input for the first
- // lane and the second input for the second lane. This may result in
- // duplicate sources, but this can be dealt with in the backend.
- Value *OutOps[2];
- uint32_t Indices[8];
- for (unsigned l = 0; l != 2; ++l) {
- // Determine the source for this lane.
- if (Imm & (1 << ((l * 4) + 3)))
- OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
- else if (Imm & (1 << ((l * 4) + 1)))
- OutOps[l] = Ops[1];
- else
- OutOps[l] = Ops[0];
- for (unsigned i = 0; i != NumElts/2; ++i) {
- // Start with ith element of the source for this lane.
- unsigned Idx = (l * NumElts) + i;
- // If bit 0 of the immediate half is set, switch to the high half of
- // the source.
- if (Imm & (1 << (l * 4)))
- Idx += NumElts/2;
- Indices[(l * (NumElts/2)) + i] = Idx;
- }
- }
- return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
- makeArrayRef(Indices, NumElts),
- "vperm");
- }
- case X86::BI__builtin_ia32_pslldqi128_byteshift:
- case X86::BI__builtin_ia32_pslldqi256_byteshift:
- case X86::BI__builtin_ia32_pslldqi512_byteshift: {
- unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- llvm::Type *ResultType = Ops[0]->getType();
- // Builtin type is vXi64 so multiply by 8 to get bytes.
- unsigned NumElts = ResultType->getVectorNumElements() * 8;
- // If pslldq is shifting the vector more than 15 bytes, emit zero.
- if (ShiftVal >= 16)
- return llvm::Constant::getNullValue(ResultType);
- uint32_t Indices[64];
- // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
- for (unsigned l = 0; l != NumElts; l += 16) {
- for (unsigned i = 0; i != 16; ++i) {
- unsigned Idx = NumElts + i - ShiftVal;
- if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
- Indices[l + i] = Idx + l;
- }
- }
- llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
- Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
- Value *Zero = llvm::Constant::getNullValue(VecTy);
- Value *SV = Builder.CreateShuffleVector(Zero, Cast,
- makeArrayRef(Indices, NumElts),
- "pslldq");
- return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
- }
- case X86::BI__builtin_ia32_psrldqi128_byteshift:
- case X86::BI__builtin_ia32_psrldqi256_byteshift:
- case X86::BI__builtin_ia32_psrldqi512_byteshift: {
- unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- llvm::Type *ResultType = Ops[0]->getType();
- // Builtin type is vXi64 so multiply by 8 to get bytes.
- unsigned NumElts = ResultType->getVectorNumElements() * 8;
- // If psrldq is shifting the vector more than 15 bytes, emit zero.
- if (ShiftVal >= 16)
- return llvm::Constant::getNullValue(ResultType);
- uint32_t Indices[64];
- // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
- for (unsigned l = 0; l != NumElts; l += 16) {
- for (unsigned i = 0; i != 16; ++i) {
- unsigned Idx = i + ShiftVal;
- if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
- Indices[l + i] = Idx + l;
- }
- }
- llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
- Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
- Value *Zero = llvm::Constant::getNullValue(VecTy);
- Value *SV = Builder.CreateShuffleVector(Cast, Zero,
- makeArrayRef(Indices, NumElts),
- "psrldq");
- return Builder.CreateBitCast(SV, ResultType, "cast");
- }
- case X86::BI__builtin_ia32_kshiftliqi:
- case X86::BI__builtin_ia32_kshiftlihi:
- case X86::BI__builtin_ia32_kshiftlisi:
- case X86::BI__builtin_ia32_kshiftlidi: {
- unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
- if (ShiftVal >= NumElts)
- return llvm::Constant::getNullValue(Ops[0]->getType());
- Value *In = getMaskVecValue(*this, Ops[0], NumElts);
- uint32_t Indices[64];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = NumElts + i - ShiftVal;
- Value *Zero = llvm::Constant::getNullValue(In->getType());
- Value *SV = Builder.CreateShuffleVector(Zero, In,
- makeArrayRef(Indices, NumElts),
- "kshiftl");
- return Builder.CreateBitCast(SV, Ops[0]->getType());
- }
- case X86::BI__builtin_ia32_kshiftriqi:
- case X86::BI__builtin_ia32_kshiftrihi:
- case X86::BI__builtin_ia32_kshiftrisi:
- case X86::BI__builtin_ia32_kshiftridi: {
- unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
- unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
- if (ShiftVal >= NumElts)
- return llvm::Constant::getNullValue(Ops[0]->getType());
- Value *In = getMaskVecValue(*this, Ops[0], NumElts);
- uint32_t Indices[64];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i + ShiftVal;
- Value *Zero = llvm::Constant::getNullValue(In->getType());
- Value *SV = Builder.CreateShuffleVector(In, Zero,
- makeArrayRef(Indices, NumElts),
- "kshiftr");
- return Builder.CreateBitCast(SV, Ops[0]->getType());
- }
- case X86::BI__builtin_ia32_movnti:
- case X86::BI__builtin_ia32_movnti64:
- case X86::BI__builtin_ia32_movntsd:
- case X86::BI__builtin_ia32_movntss: {
- llvm::MDNode *Node = llvm::MDNode::get(
- getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
- Value *Ptr = Ops[0];
- Value *Src = Ops[1];
- // Extract the 0'th element of the source vector.
- if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
- BuiltinID == X86::BI__builtin_ia32_movntss)
- Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
- // Convert the type of the pointer to a pointer to the stored type.
- Value *BC = Builder.CreateBitCast(
- Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
- // Unaligned nontemporal store of the scalar value.
- StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
- SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
- SI->setAlignment(1);
- return SI;
- }
- // Rotate is a special case of funnel shift - 1st 2 args are the same.
- case X86::BI__builtin_ia32_vprotb:
- case X86::BI__builtin_ia32_vprotw:
- case X86::BI__builtin_ia32_vprotd:
- case X86::BI__builtin_ia32_vprotq:
- case X86::BI__builtin_ia32_vprotbi:
- case X86::BI__builtin_ia32_vprotwi:
- case X86::BI__builtin_ia32_vprotdi:
- case X86::BI__builtin_ia32_vprotqi:
- case X86::BI__builtin_ia32_prold128:
- case X86::BI__builtin_ia32_prold256:
- case X86::BI__builtin_ia32_prold512:
- case X86::BI__builtin_ia32_prolq128:
- case X86::BI__builtin_ia32_prolq256:
- case X86::BI__builtin_ia32_prolq512:
- case X86::BI__builtin_ia32_prolvd128:
- case X86::BI__builtin_ia32_prolvd256:
- case X86::BI__builtin_ia32_prolvd512:
- case X86::BI__builtin_ia32_prolvq128:
- case X86::BI__builtin_ia32_prolvq256:
- case X86::BI__builtin_ia32_prolvq512:
- return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
- case X86::BI__builtin_ia32_prord128:
- case X86::BI__builtin_ia32_prord256:
- case X86::BI__builtin_ia32_prord512:
- case X86::BI__builtin_ia32_prorq128:
- case X86::BI__builtin_ia32_prorq256:
- case X86::BI__builtin_ia32_prorq512:
- case X86::BI__builtin_ia32_prorvd128:
- case X86::BI__builtin_ia32_prorvd256:
- case X86::BI__builtin_ia32_prorvd512:
- case X86::BI__builtin_ia32_prorvq128:
- case X86::BI__builtin_ia32_prorvq256:
- case X86::BI__builtin_ia32_prorvq512:
- return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
- case X86::BI__builtin_ia32_selectb_128:
- case X86::BI__builtin_ia32_selectb_256:
- case X86::BI__builtin_ia32_selectb_512:
- case X86::BI__builtin_ia32_selectw_128:
- case X86::BI__builtin_ia32_selectw_256:
- case X86::BI__builtin_ia32_selectw_512:
- case X86::BI__builtin_ia32_selectd_128:
- case X86::BI__builtin_ia32_selectd_256:
- case X86::BI__builtin_ia32_selectd_512:
- case X86::BI__builtin_ia32_selectq_128:
- case X86::BI__builtin_ia32_selectq_256:
- case X86::BI__builtin_ia32_selectq_512:
- case X86::BI__builtin_ia32_selectps_128:
- case X86::BI__builtin_ia32_selectps_256:
- case X86::BI__builtin_ia32_selectps_512:
- case X86::BI__builtin_ia32_selectpd_128:
- case X86::BI__builtin_ia32_selectpd_256:
- case X86::BI__builtin_ia32_selectpd_512:
- return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
- case X86::BI__builtin_ia32_selectss_128:
- case X86::BI__builtin_ia32_selectsd_128: {
- Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
- Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
- A = EmitX86ScalarSelect(*this, Ops[0], A, B);
- return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
- }
- case X86::BI__builtin_ia32_cmpb128_mask:
- case X86::BI__builtin_ia32_cmpb256_mask:
- case X86::BI__builtin_ia32_cmpb512_mask:
- case X86::BI__builtin_ia32_cmpw128_mask:
- case X86::BI__builtin_ia32_cmpw256_mask:
- case X86::BI__builtin_ia32_cmpw512_mask:
- case X86::BI__builtin_ia32_cmpd128_mask:
- case X86::BI__builtin_ia32_cmpd256_mask:
- case X86::BI__builtin_ia32_cmpd512_mask:
- case X86::BI__builtin_ia32_cmpq128_mask:
- case X86::BI__builtin_ia32_cmpq256_mask:
- case X86::BI__builtin_ia32_cmpq512_mask: {
- unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
- return EmitX86MaskedCompare(*this, CC, true, Ops);
- }
- case X86::BI__builtin_ia32_ucmpb128_mask:
- case X86::BI__builtin_ia32_ucmpb256_mask:
- case X86::BI__builtin_ia32_ucmpb512_mask:
- case X86::BI__builtin_ia32_ucmpw128_mask:
- case X86::BI__builtin_ia32_ucmpw256_mask:
- case X86::BI__builtin_ia32_ucmpw512_mask:
- case X86::BI__builtin_ia32_ucmpd128_mask:
- case X86::BI__builtin_ia32_ucmpd256_mask:
- case X86::BI__builtin_ia32_ucmpd512_mask:
- case X86::BI__builtin_ia32_ucmpq128_mask:
- case X86::BI__builtin_ia32_ucmpq256_mask:
- case X86::BI__builtin_ia32_ucmpq512_mask: {
- unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
- return EmitX86MaskedCompare(*this, CC, false, Ops);
- }
- case X86::BI__builtin_ia32_vpcomb:
- case X86::BI__builtin_ia32_vpcomw:
- case X86::BI__builtin_ia32_vpcomd:
- case X86::BI__builtin_ia32_vpcomq:
- return EmitX86vpcom(*this, Ops, true);
- case X86::BI__builtin_ia32_vpcomub:
- case X86::BI__builtin_ia32_vpcomuw:
- case X86::BI__builtin_ia32_vpcomud:
- case X86::BI__builtin_ia32_vpcomuq:
- return EmitX86vpcom(*this, Ops, false);
- case X86::BI__builtin_ia32_kortestcqi:
- case X86::BI__builtin_ia32_kortestchi:
- case X86::BI__builtin_ia32_kortestcsi:
- case X86::BI__builtin_ia32_kortestcdi: {
- Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
- Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
- Value *Cmp = Builder.CreateICmpEQ(Or, C);
- return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
- }
- case X86::BI__builtin_ia32_kortestzqi:
- case X86::BI__builtin_ia32_kortestzhi:
- case X86::BI__builtin_ia32_kortestzsi:
- case X86::BI__builtin_ia32_kortestzdi: {
- Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
- Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
- Value *Cmp = Builder.CreateICmpEQ(Or, C);
- return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
- }
- case X86::BI__builtin_ia32_ktestcqi:
- case X86::BI__builtin_ia32_ktestzqi:
- case X86::BI__builtin_ia32_ktestchi:
- case X86::BI__builtin_ia32_ktestzhi:
- case X86::BI__builtin_ia32_ktestcsi:
- case X86::BI__builtin_ia32_ktestzsi:
- case X86::BI__builtin_ia32_ktestcdi:
- case X86::BI__builtin_ia32_ktestzdi: {
- Intrinsic::ID IID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_ktestcqi:
- IID = Intrinsic::x86_avx512_ktestc_b;
- break;
- case X86::BI__builtin_ia32_ktestzqi:
- IID = Intrinsic::x86_avx512_ktestz_b;
- break;
- case X86::BI__builtin_ia32_ktestchi:
- IID = Intrinsic::x86_avx512_ktestc_w;
- break;
- case X86::BI__builtin_ia32_ktestzhi:
- IID = Intrinsic::x86_avx512_ktestz_w;
- break;
- case X86::BI__builtin_ia32_ktestcsi:
- IID = Intrinsic::x86_avx512_ktestc_d;
- break;
- case X86::BI__builtin_ia32_ktestzsi:
- IID = Intrinsic::x86_avx512_ktestz_d;
- break;
- case X86::BI__builtin_ia32_ktestcdi:
- IID = Intrinsic::x86_avx512_ktestc_q;
- break;
- case X86::BI__builtin_ia32_ktestzdi:
- IID = Intrinsic::x86_avx512_ktestz_q;
- break;
- }
- unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
- Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
- Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
- Function *Intr = CGM.getIntrinsic(IID);
- return Builder.CreateCall(Intr, {LHS, RHS});
- }
- case X86::BI__builtin_ia32_kaddqi:
- case X86::BI__builtin_ia32_kaddhi:
- case X86::BI__builtin_ia32_kaddsi:
- case X86::BI__builtin_ia32_kadddi: {
- Intrinsic::ID IID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_kaddqi:
- IID = Intrinsic::x86_avx512_kadd_b;
- break;
- case X86::BI__builtin_ia32_kaddhi:
- IID = Intrinsic::x86_avx512_kadd_w;
- break;
- case X86::BI__builtin_ia32_kaddsi:
- IID = Intrinsic::x86_avx512_kadd_d;
- break;
- case X86::BI__builtin_ia32_kadddi:
- IID = Intrinsic::x86_avx512_kadd_q;
- break;
- }
- unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
- Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
- Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
- Function *Intr = CGM.getIntrinsic(IID);
- Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
- return Builder.CreateBitCast(Res, Ops[0]->getType());
- }
- case X86::BI__builtin_ia32_kandqi:
- case X86::BI__builtin_ia32_kandhi:
- case X86::BI__builtin_ia32_kandsi:
- case X86::BI__builtin_ia32_kanddi:
- return EmitX86MaskLogic(*this, Instruction::And, Ops);
- case X86::BI__builtin_ia32_kandnqi:
- case X86::BI__builtin_ia32_kandnhi:
- case X86::BI__builtin_ia32_kandnsi:
- case X86::BI__builtin_ia32_kandndi:
- return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
- case X86::BI__builtin_ia32_korqi:
- case X86::BI__builtin_ia32_korhi:
- case X86::BI__builtin_ia32_korsi:
- case X86::BI__builtin_ia32_kordi:
- return EmitX86MaskLogic(*this, Instruction::Or, Ops);
- case X86::BI__builtin_ia32_kxnorqi:
- case X86::BI__builtin_ia32_kxnorhi:
- case X86::BI__builtin_ia32_kxnorsi:
- case X86::BI__builtin_ia32_kxnordi:
- return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
- case X86::BI__builtin_ia32_kxorqi:
- case X86::BI__builtin_ia32_kxorhi:
- case X86::BI__builtin_ia32_kxorsi:
- case X86::BI__builtin_ia32_kxordi:
- return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
- case X86::BI__builtin_ia32_knotqi:
- case X86::BI__builtin_ia32_knothi:
- case X86::BI__builtin_ia32_knotsi:
- case X86::BI__builtin_ia32_knotdi: {
- unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
- Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
- return Builder.CreateBitCast(Builder.CreateNot(Res),
- Ops[0]->getType());
- }
- case X86::BI__builtin_ia32_kmovb:
- case X86::BI__builtin_ia32_kmovw:
- case X86::BI__builtin_ia32_kmovd:
- case X86::BI__builtin_ia32_kmovq: {
- // Bitcast to vXi1 type and then back to integer. This gets the mask
- // register type into the IR, but might be optimized out depending on
- // what's around it.
- unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
- Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
- return Builder.CreateBitCast(Res, Ops[0]->getType());
- }
- case X86::BI__builtin_ia32_kunpckdi:
- case X86::BI__builtin_ia32_kunpcksi:
- case X86::BI__builtin_ia32_kunpckhi: {
- unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
- Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
- Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
- uint32_t Indices[64];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i;
- // First extract half of each vector. This gives better codegen than
- // doing it in a single shuffle.
- LHS = Builder.CreateShuffleVector(LHS, LHS,
- makeArrayRef(Indices, NumElts / 2));
- RHS = Builder.CreateShuffleVector(RHS, RHS,
- makeArrayRef(Indices, NumElts / 2));
- // Concat the vectors.
- // NOTE: Operands are swapped to match the intrinsic definition.
- Value *Res = Builder.CreateShuffleVector(RHS, LHS,
- makeArrayRef(Indices, NumElts));
- return Builder.CreateBitCast(Res, Ops[0]->getType());
- }
- case X86::BI__builtin_ia32_vplzcntd_128:
- case X86::BI__builtin_ia32_vplzcntd_256:
- case X86::BI__builtin_ia32_vplzcntd_512:
- case X86::BI__builtin_ia32_vplzcntq_128:
- case X86::BI__builtin_ia32_vplzcntq_256:
- case X86::BI__builtin_ia32_vplzcntq_512: {
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
- }
- case X86::BI__builtin_ia32_sqrtss:
- case X86::BI__builtin_ia32_sqrtsd: {
- Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
- A = Builder.CreateCall(F, {A});
- return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
- }
- case X86::BI__builtin_ia32_sqrtsd_round_mask:
- case X86::BI__builtin_ia32_sqrtss_round_mask: {
- unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
- // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
- // otherwise keep the intrinsic.
- if (CC != 4) {
- Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
- Intrinsic::x86_avx512_mask_sqrt_sd :
- Intrinsic::x86_avx512_mask_sqrt_ss;
- return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
- }
- Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
- A = Builder.CreateCall(F, A);
- Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
- A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
- return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
- }
- case X86::BI__builtin_ia32_sqrtpd256:
- case X86::BI__builtin_ia32_sqrtpd:
- case X86::BI__builtin_ia32_sqrtps256:
- case X86::BI__builtin_ia32_sqrtps:
- case X86::BI__builtin_ia32_sqrtps512:
- case X86::BI__builtin_ia32_sqrtpd512: {
- if (Ops.size() == 2) {
- unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
- // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
- // otherwise keep the intrinsic.
- if (CC != 4) {
- Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
- Intrinsic::x86_avx512_sqrt_ps_512 :
- Intrinsic::x86_avx512_sqrt_pd_512;
- return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
- }
- }
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
- return Builder.CreateCall(F, Ops[0]);
- }
- case X86::BI__builtin_ia32_pabsb128:
- case X86::BI__builtin_ia32_pabsw128:
- case X86::BI__builtin_ia32_pabsd128:
- case X86::BI__builtin_ia32_pabsb256:
- case X86::BI__builtin_ia32_pabsw256:
- case X86::BI__builtin_ia32_pabsd256:
- case X86::BI__builtin_ia32_pabsq128:
- case X86::BI__builtin_ia32_pabsq256:
- case X86::BI__builtin_ia32_pabsb512:
- case X86::BI__builtin_ia32_pabsw512:
- case X86::BI__builtin_ia32_pabsd512:
- case X86::BI__builtin_ia32_pabsq512:
- return EmitX86Abs(*this, Ops);
- case X86::BI__builtin_ia32_pmaxsb128:
- case X86::BI__builtin_ia32_pmaxsw128:
- case X86::BI__builtin_ia32_pmaxsd128:
- case X86::BI__builtin_ia32_pmaxsq128:
- case X86::BI__builtin_ia32_pmaxsb256:
- case X86::BI__builtin_ia32_pmaxsw256:
- case X86::BI__builtin_ia32_pmaxsd256:
- case X86::BI__builtin_ia32_pmaxsq256:
- case X86::BI__builtin_ia32_pmaxsb512:
- case X86::BI__builtin_ia32_pmaxsw512:
- case X86::BI__builtin_ia32_pmaxsd512:
- case X86::BI__builtin_ia32_pmaxsq512:
- return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
- case X86::BI__builtin_ia32_pmaxub128:
- case X86::BI__builtin_ia32_pmaxuw128:
- case X86::BI__builtin_ia32_pmaxud128:
- case X86::BI__builtin_ia32_pmaxuq128:
- case X86::BI__builtin_ia32_pmaxub256:
- case X86::BI__builtin_ia32_pmaxuw256:
- case X86::BI__builtin_ia32_pmaxud256:
- case X86::BI__builtin_ia32_pmaxuq256:
- case X86::BI__builtin_ia32_pmaxub512:
- case X86::BI__builtin_ia32_pmaxuw512:
- case X86::BI__builtin_ia32_pmaxud512:
- case X86::BI__builtin_ia32_pmaxuq512:
- return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
- case X86::BI__builtin_ia32_pminsb128:
- case X86::BI__builtin_ia32_pminsw128:
- case X86::BI__builtin_ia32_pminsd128:
- case X86::BI__builtin_ia32_pminsq128:
- case X86::BI__builtin_ia32_pminsb256:
- case X86::BI__builtin_ia32_pminsw256:
- case X86::BI__builtin_ia32_pminsd256:
- case X86::BI__builtin_ia32_pminsq256:
- case X86::BI__builtin_ia32_pminsb512:
- case X86::BI__builtin_ia32_pminsw512:
- case X86::BI__builtin_ia32_pminsd512:
- case X86::BI__builtin_ia32_pminsq512:
- return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
- case X86::BI__builtin_ia32_pminub128:
- case X86::BI__builtin_ia32_pminuw128:
- case X86::BI__builtin_ia32_pminud128:
- case X86::BI__builtin_ia32_pminuq128:
- case X86::BI__builtin_ia32_pminub256:
- case X86::BI__builtin_ia32_pminuw256:
- case X86::BI__builtin_ia32_pminud256:
- case X86::BI__builtin_ia32_pminuq256:
- case X86::BI__builtin_ia32_pminub512:
- case X86::BI__builtin_ia32_pminuw512:
- case X86::BI__builtin_ia32_pminud512:
- case X86::BI__builtin_ia32_pminuq512:
- return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
- case X86::BI__builtin_ia32_pmuludq128:
- case X86::BI__builtin_ia32_pmuludq256:
- case X86::BI__builtin_ia32_pmuludq512:
- return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
- case X86::BI__builtin_ia32_pmuldq128:
- case X86::BI__builtin_ia32_pmuldq256:
- case X86::BI__builtin_ia32_pmuldq512:
- return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
- case X86::BI__builtin_ia32_pternlogd512_mask:
- case X86::BI__builtin_ia32_pternlogq512_mask:
- case X86::BI__builtin_ia32_pternlogd128_mask:
- case X86::BI__builtin_ia32_pternlogd256_mask:
- case X86::BI__builtin_ia32_pternlogq128_mask:
- case X86::BI__builtin_ia32_pternlogq256_mask:
- return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
- case X86::BI__builtin_ia32_pternlogd512_maskz:
- case X86::BI__builtin_ia32_pternlogq512_maskz:
- case X86::BI__builtin_ia32_pternlogd128_maskz:
- case X86::BI__builtin_ia32_pternlogd256_maskz:
- case X86::BI__builtin_ia32_pternlogq128_maskz:
- case X86::BI__builtin_ia32_pternlogq256_maskz:
- return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
- case X86::BI__builtin_ia32_vpshldd128:
- case X86::BI__builtin_ia32_vpshldd256:
- case X86::BI__builtin_ia32_vpshldd512:
- case X86::BI__builtin_ia32_vpshldq128:
- case X86::BI__builtin_ia32_vpshldq256:
- case X86::BI__builtin_ia32_vpshldq512:
- case X86::BI__builtin_ia32_vpshldw128:
- case X86::BI__builtin_ia32_vpshldw256:
- case X86::BI__builtin_ia32_vpshldw512:
- return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
- case X86::BI__builtin_ia32_vpshrdd128:
- case X86::BI__builtin_ia32_vpshrdd256:
- case X86::BI__builtin_ia32_vpshrdd512:
- case X86::BI__builtin_ia32_vpshrdq128:
- case X86::BI__builtin_ia32_vpshrdq256:
- case X86::BI__builtin_ia32_vpshrdq512:
- case X86::BI__builtin_ia32_vpshrdw128:
- case X86::BI__builtin_ia32_vpshrdw256:
- case X86::BI__builtin_ia32_vpshrdw512:
- // Ops 0 and 1 are swapped.
- return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
- case X86::BI__builtin_ia32_vpshldvd128:
- case X86::BI__builtin_ia32_vpshldvd256:
- case X86::BI__builtin_ia32_vpshldvd512:
- case X86::BI__builtin_ia32_vpshldvq128:
- case X86::BI__builtin_ia32_vpshldvq256:
- case X86::BI__builtin_ia32_vpshldvq512:
- case X86::BI__builtin_ia32_vpshldvw128:
- case X86::BI__builtin_ia32_vpshldvw256:
- case X86::BI__builtin_ia32_vpshldvw512:
- return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
- case X86::BI__builtin_ia32_vpshrdvd128:
- case X86::BI__builtin_ia32_vpshrdvd256:
- case X86::BI__builtin_ia32_vpshrdvd512:
- case X86::BI__builtin_ia32_vpshrdvq128:
- case X86::BI__builtin_ia32_vpshrdvq256:
- case X86::BI__builtin_ia32_vpshrdvq512:
- case X86::BI__builtin_ia32_vpshrdvw128:
- case X86::BI__builtin_ia32_vpshrdvw256:
- case X86::BI__builtin_ia32_vpshrdvw512:
- // Ops 0 and 1 are swapped.
- return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
- // 3DNow!
- case X86::BI__builtin_ia32_pswapdsf:
- case X86::BI__builtin_ia32_pswapdsi: {
- llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
- Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
- return Builder.CreateCall(F, Ops, "pswapd");
- }
- case X86::BI__builtin_ia32_rdrand16_step:
- case X86::BI__builtin_ia32_rdrand32_step:
- case X86::BI__builtin_ia32_rdrand64_step:
- case X86::BI__builtin_ia32_rdseed16_step:
- case X86::BI__builtin_ia32_rdseed32_step:
- case X86::BI__builtin_ia32_rdseed64_step: {
- Intrinsic::ID ID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_rdrand16_step:
- ID = Intrinsic::x86_rdrand_16;
- break;
- case X86::BI__builtin_ia32_rdrand32_step:
- ID = Intrinsic::x86_rdrand_32;
- break;
- case X86::BI__builtin_ia32_rdrand64_step:
- ID = Intrinsic::x86_rdrand_64;
- break;
- case X86::BI__builtin_ia32_rdseed16_step:
- ID = Intrinsic::x86_rdseed_16;
- break;
- case X86::BI__builtin_ia32_rdseed32_step:
- ID = Intrinsic::x86_rdseed_32;
- break;
- case X86::BI__builtin_ia32_rdseed64_step:
- ID = Intrinsic::x86_rdseed_64;
- break;
- }
- Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
- Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
- Ops[0]);
- return Builder.CreateExtractValue(Call, 1);
- }
- case X86::BI__builtin_ia32_addcarryx_u32:
- case X86::BI__builtin_ia32_addcarryx_u64:
- case X86::BI__builtin_ia32_subborrow_u32:
- case X86::BI__builtin_ia32_subborrow_u64: {
- Intrinsic::ID IID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_addcarryx_u32:
- IID = Intrinsic::x86_addcarry_32;
- break;
- case X86::BI__builtin_ia32_addcarryx_u64:
- IID = Intrinsic::x86_addcarry_64;
- break;
- case X86::BI__builtin_ia32_subborrow_u32:
- IID = Intrinsic::x86_subborrow_32;
- break;
- case X86::BI__builtin_ia32_subborrow_u64:
- IID = Intrinsic::x86_subborrow_64;
- break;
- }
- Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
- { Ops[0], Ops[1], Ops[2] });
- Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
- Ops[3]);
- return Builder.CreateExtractValue(Call, 0);
- }
- case X86::BI__builtin_ia32_fpclassps128_mask:
- case X86::BI__builtin_ia32_fpclassps256_mask:
- case X86::BI__builtin_ia32_fpclassps512_mask:
- case X86::BI__builtin_ia32_fpclasspd128_mask:
- case X86::BI__builtin_ia32_fpclasspd256_mask:
- case X86::BI__builtin_ia32_fpclasspd512_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- Value *MaskIn = Ops[2];
- Ops.erase(&Ops[2]);
- Intrinsic::ID ID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_fpclassps128_mask:
- ID = Intrinsic::x86_avx512_fpclass_ps_128;
- break;
- case X86::BI__builtin_ia32_fpclassps256_mask:
- ID = Intrinsic::x86_avx512_fpclass_ps_256;
- break;
- case X86::BI__builtin_ia32_fpclassps512_mask:
- ID = Intrinsic::x86_avx512_fpclass_ps_512;
- break;
- case X86::BI__builtin_ia32_fpclasspd128_mask:
- ID = Intrinsic::x86_avx512_fpclass_pd_128;
- break;
- case X86::BI__builtin_ia32_fpclasspd256_mask:
- ID = Intrinsic::x86_avx512_fpclass_pd_256;
- break;
- case X86::BI__builtin_ia32_fpclasspd512_mask:
- ID = Intrinsic::x86_avx512_fpclass_pd_512;
- break;
- }
- Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
- }
- case X86::BI__builtin_ia32_vp2intersect_q_512:
- case X86::BI__builtin_ia32_vp2intersect_q_256:
- case X86::BI__builtin_ia32_vp2intersect_q_128:
- case X86::BI__builtin_ia32_vp2intersect_d_512:
- case X86::BI__builtin_ia32_vp2intersect_d_256:
- case X86::BI__builtin_ia32_vp2intersect_d_128: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- Intrinsic::ID ID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_vp2intersect_q_512:
- ID = Intrinsic::x86_avx512_vp2intersect_q_512;
- break;
- case X86::BI__builtin_ia32_vp2intersect_q_256:
- ID = Intrinsic::x86_avx512_vp2intersect_q_256;
- break;
- case X86::BI__builtin_ia32_vp2intersect_q_128:
- ID = Intrinsic::x86_avx512_vp2intersect_q_128;
- break;
- case X86::BI__builtin_ia32_vp2intersect_d_512:
- ID = Intrinsic::x86_avx512_vp2intersect_d_512;
- break;
- case X86::BI__builtin_ia32_vp2intersect_d_256:
- ID = Intrinsic::x86_avx512_vp2intersect_d_256;
- break;
- case X86::BI__builtin_ia32_vp2intersect_d_128:
- ID = Intrinsic::x86_avx512_vp2intersect_d_128;
- break;
- }
- Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
- Value *Result = Builder.CreateExtractValue(Call, 0);
- Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
- Builder.CreateDefaultAlignedStore(Result, Ops[2]);
- Result = Builder.CreateExtractValue(Call, 1);
- Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
- return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
- }
- case X86::BI__builtin_ia32_vpmultishiftqb128:
- case X86::BI__builtin_ia32_vpmultishiftqb256:
- case X86::BI__builtin_ia32_vpmultishiftqb512: {
- Intrinsic::ID ID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_vpmultishiftqb128:
- ID = Intrinsic::x86_avx512_pmultishift_qb_128;
- break;
- case X86::BI__builtin_ia32_vpmultishiftqb256:
- ID = Intrinsic::x86_avx512_pmultishift_qb_256;
- break;
- case X86::BI__builtin_ia32_vpmultishiftqb512:
- ID = Intrinsic::x86_avx512_pmultishift_qb_512;
- break;
- }
- return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- }
- case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
- case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
- case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- Value *MaskIn = Ops[2];
- Ops.erase(&Ops[2]);
- Intrinsic::ID ID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
- ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
- break;
- case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
- ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
- break;
- case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
- ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
- break;
- }
- Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
- }
- // packed comparison intrinsics
- case X86::BI__builtin_ia32_cmpeqps:
- case X86::BI__builtin_ia32_cmpeqpd:
- return getVectorFCmpIR(CmpInst::FCMP_OEQ);
- case X86::BI__builtin_ia32_cmpltps:
- case X86::BI__builtin_ia32_cmpltpd:
- return getVectorFCmpIR(CmpInst::FCMP_OLT);
- case X86::BI__builtin_ia32_cmpleps:
- case X86::BI__builtin_ia32_cmplepd:
- return getVectorFCmpIR(CmpInst::FCMP_OLE);
- case X86::BI__builtin_ia32_cmpunordps:
- case X86::BI__builtin_ia32_cmpunordpd:
- return getVectorFCmpIR(CmpInst::FCMP_UNO);
- case X86::BI__builtin_ia32_cmpneqps:
- case X86::BI__builtin_ia32_cmpneqpd:
- return getVectorFCmpIR(CmpInst::FCMP_UNE);
- case X86::BI__builtin_ia32_cmpnltps:
- case X86::BI__builtin_ia32_cmpnltpd:
- return getVectorFCmpIR(CmpInst::FCMP_UGE);
- case X86::BI__builtin_ia32_cmpnleps:
- case X86::BI__builtin_ia32_cmpnlepd:
- return getVectorFCmpIR(CmpInst::FCMP_UGT);
- case X86::BI__builtin_ia32_cmpordps:
- case X86::BI__builtin_ia32_cmpordpd:
- return getVectorFCmpIR(CmpInst::FCMP_ORD);
- case X86::BI__builtin_ia32_cmpps:
- case X86::BI__builtin_ia32_cmpps256:
- case X86::BI__builtin_ia32_cmppd:
- case X86::BI__builtin_ia32_cmppd256:
- case X86::BI__builtin_ia32_cmpps128_mask:
- case X86::BI__builtin_ia32_cmpps256_mask:
- case X86::BI__builtin_ia32_cmpps512_mask:
- case X86::BI__builtin_ia32_cmppd128_mask:
- case X86::BI__builtin_ia32_cmppd256_mask:
- case X86::BI__builtin_ia32_cmppd512_mask: {
- // Lowering vector comparisons to fcmp instructions, while
- // ignoring signalling behaviour requested
- // ignoring rounding mode requested
- // This is is only possible as long as FENV_ACCESS is not implemented.
- // See also: https://reviews.llvm.org/D45616
- // The third argument is the comparison condition, and integer in the
- // range [0, 31]
- unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
- // Lowering to IR fcmp instruction.
- // Ignoring requested signaling behaviour,
- // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
- FCmpInst::Predicate Pred;
- switch (CC) {
- case 0x00: Pred = FCmpInst::FCMP_OEQ; break;
- case 0x01: Pred = FCmpInst::FCMP_OLT; break;
- case 0x02: Pred = FCmpInst::FCMP_OLE; break;
- case 0x03: Pred = FCmpInst::FCMP_UNO; break;
- case 0x04: Pred = FCmpInst::FCMP_UNE; break;
- case 0x05: Pred = FCmpInst::FCMP_UGE; break;
- case 0x06: Pred = FCmpInst::FCMP_UGT; break;
- case 0x07: Pred = FCmpInst::FCMP_ORD; break;
- case 0x08: Pred = FCmpInst::FCMP_UEQ; break;
- case 0x09: Pred = FCmpInst::FCMP_ULT; break;
- case 0x0a: Pred = FCmpInst::FCMP_ULE; break;
- case 0x0b: Pred = FCmpInst::FCMP_FALSE; break;
- case 0x0c: Pred = FCmpInst::FCMP_ONE; break;
- case 0x0d: Pred = FCmpInst::FCMP_OGE; break;
- case 0x0e: Pred = FCmpInst::FCMP_OGT; break;
- case 0x0f: Pred = FCmpInst::FCMP_TRUE; break;
- case 0x10: Pred = FCmpInst::FCMP_OEQ; break;
- case 0x11: Pred = FCmpInst::FCMP_OLT; break;
- case 0x12: Pred = FCmpInst::FCMP_OLE; break;
- case 0x13: Pred = FCmpInst::FCMP_UNO; break;
- case 0x14: Pred = FCmpInst::FCMP_UNE; break;
- case 0x15: Pred = FCmpInst::FCMP_UGE; break;
- case 0x16: Pred = FCmpInst::FCMP_UGT; break;
- case 0x17: Pred = FCmpInst::FCMP_ORD; break;
- case 0x18: Pred = FCmpInst::FCMP_UEQ; break;
- case 0x19: Pred = FCmpInst::FCMP_ULT; break;
- case 0x1a: Pred = FCmpInst::FCMP_ULE; break;
- case 0x1b: Pred = FCmpInst::FCMP_FALSE; break;
- case 0x1c: Pred = FCmpInst::FCMP_ONE; break;
- case 0x1d: Pred = FCmpInst::FCMP_OGE; break;
- case 0x1e: Pred = FCmpInst::FCMP_OGT; break;
- case 0x1f: Pred = FCmpInst::FCMP_TRUE; break;
- default: llvm_unreachable("Unhandled CC");
- }
- // Builtins without the _mask suffix return a vector of integers
- // of the same width as the input vectors
- switch (BuiltinID) {
- case X86::BI__builtin_ia32_cmpps512_mask:
- case X86::BI__builtin_ia32_cmppd512_mask:
- case X86::BI__builtin_ia32_cmpps128_mask:
- case X86::BI__builtin_ia32_cmpps256_mask:
- case X86::BI__builtin_ia32_cmppd128_mask:
- case X86::BI__builtin_ia32_cmppd256_mask: {
- unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
- Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
- return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
- }
- default:
- return getVectorFCmpIR(Pred);
- }
- }
- // SSE scalar comparison intrinsics
- case X86::BI__builtin_ia32_cmpeqss:
- return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
- case X86::BI__builtin_ia32_cmpltss:
- return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
- case X86::BI__builtin_ia32_cmpless:
- return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
- case X86::BI__builtin_ia32_cmpunordss:
- return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
- case X86::BI__builtin_ia32_cmpneqss:
- return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
- case X86::BI__builtin_ia32_cmpnltss:
- return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
- case X86::BI__builtin_ia32_cmpnless:
- return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
- case X86::BI__builtin_ia32_cmpordss:
- return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
- case X86::BI__builtin_ia32_cmpeqsd:
- return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
- case X86::BI__builtin_ia32_cmpltsd:
- return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
- case X86::BI__builtin_ia32_cmplesd:
- return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
- case X86::BI__builtin_ia32_cmpunordsd:
- return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
- case X86::BI__builtin_ia32_cmpneqsd:
- return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
- case X86::BI__builtin_ia32_cmpnltsd:
- return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
- case X86::BI__builtin_ia32_cmpnlesd:
- return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
- case X86::BI__builtin_ia32_cmpordsd:
- return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
- // AVX512 bf16 intrinsics
- case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
- Ops[2] = getMaskVecValue(*this, Ops[2],
- Ops[0]->getType()->getVectorNumElements());
- Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
- return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
- }
- case X86::BI__builtin_ia32_cvtsbf162ss_32:
- return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
- case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
- case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
- Intrinsic::ID IID;
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
- IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
- break;
- case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
- IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
- break;
- }
- Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
- return EmitX86Select(*this, Ops[2], Res, Ops[1]);
- }
- case X86::BI__emul:
- case X86::BI__emulu: {
- llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
- bool isSigned = (BuiltinID == X86::BI__emul);
- Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
- Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
- return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
- }
- case X86::BI__mulh:
- case X86::BI__umulh:
- case X86::BI_mul128:
- case X86::BI_umul128: {
- llvm::Type *ResType = ConvertType(E->getType());
- llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
- bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
- Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
- Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
- Value *MulResult, *HigherBits;
- if (IsSigned) {
- MulResult = Builder.CreateNSWMul(LHS, RHS);
- HigherBits = Builder.CreateAShr(MulResult, 64);
- } else {
- MulResult = Builder.CreateNUWMul(LHS, RHS);
- HigherBits = Builder.CreateLShr(MulResult, 64);
- }
- HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
- if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
- return HigherBits;
- Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
- Builder.CreateStore(HigherBits, HighBitsAddress);
- return Builder.CreateIntCast(MulResult, ResType, IsSigned);
- }
- case X86::BI__faststorefence: {
- return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
- llvm::SyncScope::System);
- }
- case X86::BI__shiftleft128:
- case X86::BI__shiftright128: {
- // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
- // llvm::Function *F = CGM.getIntrinsic(
- // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
- // Int64Ty);
- // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
- // return Builder.CreateCall(F, Ops);
- llvm::Type *Int128Ty = Builder.getInt128Ty();
- Value *HighPart128 =
- Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64);
- Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty);
- Value *Val = Builder.CreateOr(HighPart128, LowPart128);
- Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
- llvm::ConstantInt::get(Int128Ty, 0x3f));
- Value *Res;
- if (BuiltinID == X86::BI__shiftleft128)
- Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
- else
- Res = Builder.CreateLShr(Val, Amt);
- return Builder.CreateTrunc(Res, Int64Ty);
- }
- case X86::BI_ReadWriteBarrier:
- case X86::BI_ReadBarrier:
- case X86::BI_WriteBarrier: {
- return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
- llvm::SyncScope::SingleThread);
- }
- case X86::BI_BitScanForward:
- case X86::BI_BitScanForward64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
- case X86::BI_BitScanReverse:
- case X86::BI_BitScanReverse64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
- case X86::BI_InterlockedAnd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
- case X86::BI_InterlockedExchange64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
- case X86::BI_InterlockedExchangeAdd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
- case X86::BI_InterlockedExchangeSub64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
- case X86::BI_InterlockedOr64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
- case X86::BI_InterlockedXor64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
- case X86::BI_InterlockedDecrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
- case X86::BI_InterlockedIncrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
- case X86::BI_InterlockedCompareExchange128: {
- // InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
- // instead it takes pointers to 64bit ints for Destination and
- // ComparandResult, and exchange is taken as two 64bit ints (high & low).
- // The previous value is written to ComparandResult, and success is
- // returned.
- llvm::Type *Int128Ty = Builder.getInt128Ty();
- llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
- Value *Destination =
- Builder.CreateBitCast(Ops[0], Int128PtrTy);
- Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
- Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
- Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
- getContext().toCharUnitsFromBits(128));
- Value *Exchange = Builder.CreateOr(
- Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
- ExchangeLow128);
- Value *Comparand = Builder.CreateLoad(ComparandResult);
- AtomicCmpXchgInst *CXI =
- Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
- AtomicOrdering::SequentiallyConsistent,
- AtomicOrdering::SequentiallyConsistent);
- CXI->setVolatile(true);
- // Write the result back to the inout pointer.
- Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
- // Get the success boolean and zero extend it to i8.
- Value *Success = Builder.CreateExtractValue(CXI, 1);
- return Builder.CreateZExt(Success, ConvertType(E->getType()));
- }
- case X86::BI_AddressOfReturnAddress: {
- Function *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
- return Builder.CreateCall(F);
- }
- case X86::BI__stosb: {
- // We treat __stosb as a volatile memset - it may not generate "rep stosb"
- // instruction, but it will create a memset that won't be optimized away.
- return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true);
- }
- case X86::BI__ud2:
- // llvm.trap makes a ud2a instruction on x86.
- return EmitTrapCall(Intrinsic::trap);
- case X86::BI__int2c: {
- // This syscall signals a driver assertion failure in x86 NT kernels.
- llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
- llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
- getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoReturn);
- llvm::CallInst *CI = Builder.CreateCall(IA);
- CI->setAttributes(NoReturnAttr);
- return CI;
- }
- case X86::BI__readfsbyte:
- case X86::BI__readfsword:
- case X86::BI__readfsdword:
- case X86::BI__readfsqword: {
- llvm::Type *IntTy = ConvertType(E->getType());
- Value *Ptr =
- Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
- LoadInst *Load = Builder.CreateAlignedLoad(
- IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
- Load->setVolatile(true);
- return Load;
- }
- case X86::BI__readgsbyte:
- case X86::BI__readgsword:
- case X86::BI__readgsdword:
- case X86::BI__readgsqword: {
- llvm::Type *IntTy = ConvertType(E->getType());
- Value *Ptr =
- Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
- LoadInst *Load = Builder.CreateAlignedLoad(
- IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
- Load->setVolatile(true);
- return Load;
- }
- case X86::BI__builtin_ia32_paddsb512:
- case X86::BI__builtin_ia32_paddsw512:
- case X86::BI__builtin_ia32_paddsb256:
- case X86::BI__builtin_ia32_paddsw256:
- case X86::BI__builtin_ia32_paddsb128:
- case X86::BI__builtin_ia32_paddsw128:
- return EmitX86AddSubSatExpr(*this, Ops, true, true);
- case X86::BI__builtin_ia32_paddusb512:
- case X86::BI__builtin_ia32_paddusw512:
- case X86::BI__builtin_ia32_paddusb256:
- case X86::BI__builtin_ia32_paddusw256:
- case X86::BI__builtin_ia32_paddusb128:
- case X86::BI__builtin_ia32_paddusw128:
- return EmitX86AddSubSatExpr(*this, Ops, false, true);
- case X86::BI__builtin_ia32_psubsb512:
- case X86::BI__builtin_ia32_psubsw512:
- case X86::BI__builtin_ia32_psubsb256:
- case X86::BI__builtin_ia32_psubsw256:
- case X86::BI__builtin_ia32_psubsb128:
- case X86::BI__builtin_ia32_psubsw128:
- return EmitX86AddSubSatExpr(*this, Ops, true, false);
- case X86::BI__builtin_ia32_psubusb512:
- case X86::BI__builtin_ia32_psubusw512:
- case X86::BI__builtin_ia32_psubusb256:
- case X86::BI__builtin_ia32_psubusw256:
- case X86::BI__builtin_ia32_psubusb128:
- case X86::BI__builtin_ia32_psubusw128:
- return EmitX86AddSubSatExpr(*this, Ops, false, false);
- }
- }
- Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- SmallVector<Value*, 4> Ops;
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
- switch (BuiltinID) {
- default: return nullptr;
- // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
- // call __builtin_readcyclecounter.
- case PPC::BI__builtin_ppc_get_timebase:
- return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
- // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
- case PPC::BI__builtin_altivec_lvx:
- case PPC::BI__builtin_altivec_lvxl:
- case PPC::BI__builtin_altivec_lvebx:
- case PPC::BI__builtin_altivec_lvehx:
- case PPC::BI__builtin_altivec_lvewx:
- case PPC::BI__builtin_altivec_lvsl:
- case PPC::BI__builtin_altivec_lvsr:
- case PPC::BI__builtin_vsx_lxvd2x:
- case PPC::BI__builtin_vsx_lxvw4x:
- case PPC::BI__builtin_vsx_lxvd2x_be:
- case PPC::BI__builtin_vsx_lxvw4x_be:
- case PPC::BI__builtin_vsx_lxvl:
- case PPC::BI__builtin_vsx_lxvll:
- {
- if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
- BuiltinID == PPC::BI__builtin_vsx_lxvll){
- Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
- }else {
- Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
- Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
- Ops.pop_back();
- }
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
- case PPC::BI__builtin_altivec_lvx:
- ID = Intrinsic::ppc_altivec_lvx;
- break;
- case PPC::BI__builtin_altivec_lvxl:
- ID = Intrinsic::ppc_altivec_lvxl;
- break;
- case PPC::BI__builtin_altivec_lvebx:
- ID = Intrinsic::ppc_altivec_lvebx;
- break;
- case PPC::BI__builtin_altivec_lvehx:
- ID = Intrinsic::ppc_altivec_lvehx;
- break;
- case PPC::BI__builtin_altivec_lvewx:
- ID = Intrinsic::ppc_altivec_lvewx;
- break;
- case PPC::BI__builtin_altivec_lvsl:
- ID = Intrinsic::ppc_altivec_lvsl;
- break;
- case PPC::BI__builtin_altivec_lvsr:
- ID = Intrinsic::ppc_altivec_lvsr;
- break;
- case PPC::BI__builtin_vsx_lxvd2x:
- ID = Intrinsic::ppc_vsx_lxvd2x;
- break;
- case PPC::BI__builtin_vsx_lxvw4x:
- ID = Intrinsic::ppc_vsx_lxvw4x;
- break;
- case PPC::BI__builtin_vsx_lxvd2x_be:
- ID = Intrinsic::ppc_vsx_lxvd2x_be;
- break;
- case PPC::BI__builtin_vsx_lxvw4x_be:
- ID = Intrinsic::ppc_vsx_lxvw4x_be;
- break;
- case PPC::BI__builtin_vsx_lxvl:
- ID = Intrinsic::ppc_vsx_lxvl;
- break;
- case PPC::BI__builtin_vsx_lxvll:
- ID = Intrinsic::ppc_vsx_lxvll;
- break;
- }
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, "");
- }
- // vec_st, vec_xst_be
- case PPC::BI__builtin_altivec_stvx:
- case PPC::BI__builtin_altivec_stvxl:
- case PPC::BI__builtin_altivec_stvebx:
- case PPC::BI__builtin_altivec_stvehx:
- case PPC::BI__builtin_altivec_stvewx:
- case PPC::BI__builtin_vsx_stxvd2x:
- case PPC::BI__builtin_vsx_stxvw4x:
- case PPC::BI__builtin_vsx_stxvd2x_be:
- case PPC::BI__builtin_vsx_stxvw4x_be:
- case PPC::BI__builtin_vsx_stxvl:
- case PPC::BI__builtin_vsx_stxvll:
- {
- if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
- BuiltinID == PPC::BI__builtin_vsx_stxvll ){
- Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
- }else {
- Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
- Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
- Ops.pop_back();
- }
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported st intrinsic!");
- case PPC::BI__builtin_altivec_stvx:
- ID = Intrinsic::ppc_altivec_stvx;
- break;
- case PPC::BI__builtin_altivec_stvxl:
- ID = Intrinsic::ppc_altivec_stvxl;
- break;
- case PPC::BI__builtin_altivec_stvebx:
- ID = Intrinsic::ppc_altivec_stvebx;
- break;
- case PPC::BI__builtin_altivec_stvehx:
- ID = Intrinsic::ppc_altivec_stvehx;
- break;
- case PPC::BI__builtin_altivec_stvewx:
- ID = Intrinsic::ppc_altivec_stvewx;
- break;
- case PPC::BI__builtin_vsx_stxvd2x:
- ID = Intrinsic::ppc_vsx_stxvd2x;
- break;
- case PPC::BI__builtin_vsx_stxvw4x:
- ID = Intrinsic::ppc_vsx_stxvw4x;
- break;
- case PPC::BI__builtin_vsx_stxvd2x_be:
- ID = Intrinsic::ppc_vsx_stxvd2x_be;
- break;
- case PPC::BI__builtin_vsx_stxvw4x_be:
- ID = Intrinsic::ppc_vsx_stxvw4x_be;
- break;
- case PPC::BI__builtin_vsx_stxvl:
- ID = Intrinsic::ppc_vsx_stxvl;
- break;
- case PPC::BI__builtin_vsx_stxvll:
- ID = Intrinsic::ppc_vsx_stxvll;
- break;
- }
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, "");
- }
- // Square root
- case PPC::BI__builtin_vsx_xvsqrtsp:
- case PPC::BI__builtin_vsx_xvsqrtdp: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- ID = Intrinsic::sqrt;
- llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
- }
- // Count leading zeros
- case PPC::BI__builtin_altivec_vclzb:
- case PPC::BI__builtin_altivec_vclzh:
- case PPC::BI__builtin_altivec_vclzw:
- case PPC::BI__builtin_altivec_vclzd: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
- return Builder.CreateCall(F, {X, Undef});
- }
- case PPC::BI__builtin_altivec_vctzb:
- case PPC::BI__builtin_altivec_vctzh:
- case PPC::BI__builtin_altivec_vctzw:
- case PPC::BI__builtin_altivec_vctzd: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
- Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
- return Builder.CreateCall(F, {X, Undef});
- }
- case PPC::BI__builtin_altivec_vpopcntb:
- case PPC::BI__builtin_altivec_vpopcnth:
- case PPC::BI__builtin_altivec_vpopcntw:
- case PPC::BI__builtin_altivec_vpopcntd: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
- return Builder.CreateCall(F, X);
- }
- // Copy sign
- case PPC::BI__builtin_vsx_xvcpsgnsp:
- case PPC::BI__builtin_vsx_xvcpsgndp: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Y = EmitScalarExpr(E->getArg(1));
- ID = Intrinsic::copysign;
- llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, {X, Y});
- }
- // Rounding/truncation
- case PPC::BI__builtin_vsx_xvrspip:
- case PPC::BI__builtin_vsx_xvrdpip:
- case PPC::BI__builtin_vsx_xvrdpim:
- case PPC::BI__builtin_vsx_xvrspim:
- case PPC::BI__builtin_vsx_xvrdpi:
- case PPC::BI__builtin_vsx_xvrspi:
- case PPC::BI__builtin_vsx_xvrdpic:
- case PPC::BI__builtin_vsx_xvrspic:
- case PPC::BI__builtin_vsx_xvrdpiz:
- case PPC::BI__builtin_vsx_xvrspiz: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
- BuiltinID == PPC::BI__builtin_vsx_xvrspim)
- ID = Intrinsic::floor;
- else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
- BuiltinID == PPC::BI__builtin_vsx_xvrspi)
- ID = Intrinsic::round;
- else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
- BuiltinID == PPC::BI__builtin_vsx_xvrspic)
- ID = Intrinsic::nearbyint;
- else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
- BuiltinID == PPC::BI__builtin_vsx_xvrspip)
- ID = Intrinsic::ceil;
- else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
- BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
- ID = Intrinsic::trunc;
- llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
- }
- // Absolute value
- case PPC::BI__builtin_vsx_xvabsdp:
- case PPC::BI__builtin_vsx_xvabssp: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
- return Builder.CreateCall(F, X);
- }
- // FMA variations
- case PPC::BI__builtin_vsx_xvmaddadp:
- case PPC::BI__builtin_vsx_xvmaddasp:
- case PPC::BI__builtin_vsx_xvnmaddadp:
- case PPC::BI__builtin_vsx_xvnmaddasp:
- case PPC::BI__builtin_vsx_xvmsubadp:
- case PPC::BI__builtin_vsx_xvmsubasp:
- case PPC::BI__builtin_vsx_xvnmsubadp:
- case PPC::BI__builtin_vsx_xvnmsubasp: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Y = EmitScalarExpr(E->getArg(1));
- Value *Z = EmitScalarExpr(E->getArg(2));
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- switch (BuiltinID) {
- case PPC::BI__builtin_vsx_xvmaddadp:
- case PPC::BI__builtin_vsx_xvmaddasp:
- return Builder.CreateCall(F, {X, Y, Z});
- case PPC::BI__builtin_vsx_xvnmaddadp:
- case PPC::BI__builtin_vsx_xvnmaddasp:
- return Builder.CreateFSub(Zero,
- Builder.CreateCall(F, {X, Y, Z}), "sub");
- case PPC::BI__builtin_vsx_xvmsubadp:
- case PPC::BI__builtin_vsx_xvmsubasp:
- return Builder.CreateCall(F,
- {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
- case PPC::BI__builtin_vsx_xvnmsubadp:
- case PPC::BI__builtin_vsx_xvnmsubasp:
- Value *FsubRes =
- Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
- return Builder.CreateFSub(Zero, FsubRes, "sub");
- }
- llvm_unreachable("Unknown FMA operation");
- return nullptr; // Suppress no-return warning
- }
- case PPC::BI__builtin_vsx_insertword: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
- // Third argument is a compile time constant int. It must be clamped to
- // to the range [0, 12].
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
- assert(ArgCI &&
- "Third arg to xxinsertw intrinsic must be constant integer");
- const int64_t MaxIndex = 12;
- int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
- // The builtin semantics don't exactly match the xxinsertw instructions
- // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
- // word from the first argument, and inserts it in the second argument. The
- // instruction extracts the word from its second input register and inserts
- // it into its first input register, so swap the first and second arguments.
- std::swap(Ops[0], Ops[1]);
- // Need to cast the second argument from a vector of unsigned int to a
- // vector of long long.
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
- if (getTarget().isLittleEndian()) {
- // Create a shuffle mask of (1, 0)
- Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
- ConstantInt::get(Int32Ty, 0)
- };
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
- // Reverse the double words in the vector we will extract from.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
- Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask);
- // Reverse the index.
- Index = MaxIndex - Index;
- }
- // Intrinsic expects the first arg to be a vector of int.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
- Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
- return Builder.CreateCall(F, Ops);
- }
- case PPC::BI__builtin_vsx_extractuword: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
- // Intrinsic expects the first argument to be a vector of doublewords.
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
- // The second argument is a compile time constant int that needs to
- // be clamped to the range [0, 12].
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
- assert(ArgCI &&
- "Second Arg to xxextractuw intrinsic must be a constant integer!");
- const int64_t MaxIndex = 12;
- int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
- if (getTarget().isLittleEndian()) {
- // Reverse the index.
- Index = MaxIndex - Index;
- Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
- // Emit the call, then reverse the double words of the results vector.
- Value *Call = Builder.CreateCall(F, Ops);
- // Create a shuffle mask of (1, 0)
- Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
- ConstantInt::get(Int32Ty, 0)
- };
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
- Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask);
- return ShuffleCall;
- } else {
- Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
- return Builder.CreateCall(F, Ops);
- }
- }
- case PPC::BI__builtin_vsx_xxpermdi: {
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
- assert(ArgCI && "Third arg must be constant integer!");
- unsigned Index = ArgCI->getZExtValue();
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
- // Account for endianness by treating this as just a shuffle. So we use the
- // same indices for both LE and BE in order to produce expected results in
- // both cases.
- unsigned ElemIdx0 = (Index & 2) >> 1;
- unsigned ElemIdx1 = 2 + (Index & 1);
- Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0),
- ConstantInt::get(Int32Ty, ElemIdx1)};
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
- Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
- QualType BIRetType = E->getType();
- auto RetTy = ConvertType(BIRetType);
- return Builder.CreateBitCast(ShuffleCall, RetTy);
- }
- case PPC::BI__builtin_vsx_xxsldwi: {
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
- assert(ArgCI && "Third argument must be a compile time constant");
- unsigned Index = ArgCI->getZExtValue() & 0x3;
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
- Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4));
- // Create a shuffle mask
- unsigned ElemIdx0;
- unsigned ElemIdx1;
- unsigned ElemIdx2;
- unsigned ElemIdx3;
- if (getTarget().isLittleEndian()) {
- // Little endian element N comes from element 8+N-Index of the
- // concatenated wide vector (of course, using modulo arithmetic on
- // the total number of elements).
- ElemIdx0 = (8 - Index) % 8;
- ElemIdx1 = (9 - Index) % 8;
- ElemIdx2 = (10 - Index) % 8;
- ElemIdx3 = (11 - Index) % 8;
- } else {
- // Big endian ElemIdx<N> = Index + N
- ElemIdx0 = Index;
- ElemIdx1 = Index + 1;
- ElemIdx2 = Index + 2;
- ElemIdx3 = Index + 3;
- }
- Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0),
- ConstantInt::get(Int32Ty, ElemIdx1),
- ConstantInt::get(Int32Ty, ElemIdx2),
- ConstantInt::get(Int32Ty, ElemIdx3)};
- Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
- Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
- QualType BIRetType = E->getType();
- auto RetTy = ConvertType(BIRetType);
- return Builder.CreateBitCast(ShuffleCall, RetTy);
- }
- case PPC::BI__builtin_pack_vector_int128: {
- bool isLittleEndian = getTarget().isLittleEndian();
- Value *UndefValue =
- llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), 2));
- Value *Res = Builder.CreateInsertElement(
- UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
- Res = Builder.CreateInsertElement(Res, Ops[1],
- (uint64_t)(isLittleEndian ? 0 : 1));
- return Builder.CreateBitCast(Res, ConvertType(E->getType()));
- }
- case PPC::BI__builtin_unpack_vector_int128: {
- ConstantInt *Index = cast<ConstantInt>(Ops[1]);
- Value *Unpacked = Builder.CreateBitCast(
- Ops[0], llvm::VectorType::get(ConvertType(E->getType()), 2));
- if (getTarget().isLittleEndian())
- Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
- return Builder.CreateExtractElement(Unpacked, Index);
- }
- }
- }
- Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_div_scale:
- case AMDGPU::BI__builtin_amdgcn_div_scalef: {
- // Translate from the intrinsics's struct return to the builtin's out
- // argument.
- Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
- llvm::Value *X = EmitScalarExpr(E->getArg(0));
- llvm::Value *Y = EmitScalarExpr(E->getArg(1));
- llvm::Value *Z = EmitScalarExpr(E->getArg(2));
- llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
- X->getType());
- llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
- llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
- llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
- llvm::Type *RealFlagType
- = FlagOutPtr.getPointer()->getType()->getPointerElementType();
- llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
- Builder.CreateStore(FlagExt, FlagOutPtr);
- return Result;
- }
- case AMDGPU::BI__builtin_amdgcn_div_fmas:
- case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
- llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
- llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
- llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
- Src0->getType());
- llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
- return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
- }
- case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
- return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
- case AMDGPU::BI__builtin_amdgcn_mov_dpp:
- case AMDGPU::BI__builtin_amdgcn_update_dpp: {
- llvm::SmallVector<llvm::Value *, 6> Args;
- for (unsigned I = 0; I != E->getNumArgs(); ++I)
- Args.push_back(EmitScalarExpr(E->getArg(I)));
- assert(Args.size() == 5 || Args.size() == 6);
- if (Args.size() == 5)
- Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
- Function *F =
- CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
- return Builder.CreateCall(F, Args);
- }
- case AMDGPU::BI__builtin_amdgcn_div_fixup:
- case AMDGPU::BI__builtin_amdgcn_div_fixupf:
- case AMDGPU::BI__builtin_amdgcn_div_fixuph:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
- case AMDGPU::BI__builtin_amdgcn_trig_preop:
- case AMDGPU::BI__builtin_amdgcn_trig_preopf:
- return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
- case AMDGPU::BI__builtin_amdgcn_rcp:
- case AMDGPU::BI__builtin_amdgcn_rcpf:
- case AMDGPU::BI__builtin_amdgcn_rcph:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
- case AMDGPU::BI__builtin_amdgcn_rsq:
- case AMDGPU::BI__builtin_amdgcn_rsqf:
- case AMDGPU::BI__builtin_amdgcn_rsqh:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
- case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
- case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
- case AMDGPU::BI__builtin_amdgcn_sinf:
- case AMDGPU::BI__builtin_amdgcn_sinh:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
- case AMDGPU::BI__builtin_amdgcn_cosf:
- case AMDGPU::BI__builtin_amdgcn_cosh:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
- case AMDGPU::BI__builtin_amdgcn_log_clampf:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
- case AMDGPU::BI__builtin_amdgcn_ldexp:
- case AMDGPU::BI__builtin_amdgcn_ldexpf:
- case AMDGPU::BI__builtin_amdgcn_ldexph:
- return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
- case AMDGPU::BI__builtin_amdgcn_frexp_mant:
- case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
- case AMDGPU::BI__builtin_amdgcn_frexp_manth:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
- case AMDGPU::BI__builtin_amdgcn_frexp_exp:
- case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
- Value *Src0 = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
- { Builder.getInt32Ty(), Src0->getType() });
- return Builder.CreateCall(F, Src0);
- }
- case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
- Value *Src0 = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
- { Builder.getInt16Ty(), Src0->getType() });
- return Builder.CreateCall(F, Src0);
- }
- case AMDGPU::BI__builtin_amdgcn_fract:
- case AMDGPU::BI__builtin_amdgcn_fractf:
- case AMDGPU::BI__builtin_amdgcn_fracth:
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
- case AMDGPU::BI__builtin_amdgcn_lerp:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
- case AMDGPU::BI__builtin_amdgcn_uicmp:
- case AMDGPU::BI__builtin_amdgcn_uicmpl:
- case AMDGPU::BI__builtin_amdgcn_sicmp:
- case AMDGPU::BI__builtin_amdgcn_sicmpl: {
- llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
- llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
- // FIXME-GFX10: How should 32 bit mask be handled?
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
- { Builder.getInt64Ty(), Src0->getType() });
- return Builder.CreateCall(F, { Src0, Src1, Src2 });
- }
- case AMDGPU::BI__builtin_amdgcn_fcmp:
- case AMDGPU::BI__builtin_amdgcn_fcmpf: {
- llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
- llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
- llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
- // FIXME-GFX10: How should 32 bit mask be handled?
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
- { Builder.getInt64Ty(), Src0->getType() });
- return Builder.CreateCall(F, { Src0, Src1, Src2 });
- }
- case AMDGPU::BI__builtin_amdgcn_class:
- case AMDGPU::BI__builtin_amdgcn_classf:
- case AMDGPU::BI__builtin_amdgcn_classh:
- return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
- case AMDGPU::BI__builtin_amdgcn_fmed3f:
- case AMDGPU::BI__builtin_amdgcn_fmed3h:
- return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
- case AMDGPU::BI__builtin_amdgcn_ds_append:
- case AMDGPU::BI__builtin_amdgcn_ds_consume: {
- Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
- Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
- Value *Src0 = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
- return Builder.CreateCall(F, { Src0, Builder.getFalse() });
- }
- case AMDGPU::BI__builtin_amdgcn_read_exec: {
- CallInst *CI = cast<CallInst>(
- EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
- CI->setConvergent();
- return CI;
- }
- case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
- case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
- StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
- "exec_lo" : "exec_hi";
- CallInst *CI = cast<CallInst>(
- EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName));
- CI->setConvergent();
- return CI;
- }
- // amdgcn workitem
- case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
- return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
- case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
- return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
- case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
- return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
- // r600 intrinsics
- case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
- case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
- return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
- case AMDGPU::BI__builtin_r600_read_tidig_x:
- return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
- case AMDGPU::BI__builtin_r600_read_tidig_y:
- return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
- case AMDGPU::BI__builtin_r600_read_tidig_z:
- return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
- default:
- return nullptr;
- }
- }
- /// Handle a SystemZ function in which the final argument is a pointer
- /// to an int that receives the post-instruction CC value. At the LLVM level
- /// this is represented as a function that returns a {result, cc} pair.
- static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
- unsigned IntrinsicID,
- const CallExpr *E) {
- unsigned NumArgs = E->getNumArgs() - 1;
- SmallVector<Value *, 8> Args(NumArgs);
- for (unsigned I = 0; I < NumArgs; ++I)
- Args[I] = CGF.EmitScalarExpr(E->getArg(I));
- Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
- Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
- Value *Call = CGF.Builder.CreateCall(F, Args);
- Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
- CGF.Builder.CreateStore(CC, CCPtr);
- return CGF.Builder.CreateExtractValue(Call, 0);
- }
- Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- switch (BuiltinID) {
- case SystemZ::BI__builtin_tbegin: {
- Value *TDB = EmitScalarExpr(E->getArg(0));
- Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
- Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
- return Builder.CreateCall(F, {TDB, Control});
- }
- case SystemZ::BI__builtin_tbegin_nofloat: {
- Value *TDB = EmitScalarExpr(E->getArg(0));
- Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
- Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
- return Builder.CreateCall(F, {TDB, Control});
- }
- case SystemZ::BI__builtin_tbeginc: {
- Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
- Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
- Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
- return Builder.CreateCall(F, {TDB, Control});
- }
- case SystemZ::BI__builtin_tabort: {
- Value *Data = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
- return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
- }
- case SystemZ::BI__builtin_non_tx_store: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *Data = EmitScalarExpr(E->getArg(1));
- Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
- return Builder.CreateCall(F, {Data, Address});
- }
- // Vector builtins. Note that most vector builtins are mapped automatically
- // to target-specific LLVM intrinsics. The ones handled specially here can
- // be represented via standard LLVM IR, which is preferable to enable common
- // LLVM optimizations.
- case SystemZ::BI__builtin_s390_vpopctb:
- case SystemZ::BI__builtin_s390_vpopcth:
- case SystemZ::BI__builtin_s390_vpopctf:
- case SystemZ::BI__builtin_s390_vpopctg: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
- return Builder.CreateCall(F, X);
- }
- case SystemZ::BI__builtin_s390_vclzb:
- case SystemZ::BI__builtin_s390_vclzh:
- case SystemZ::BI__builtin_s390_vclzf:
- case SystemZ::BI__builtin_s390_vclzg: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
- Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
- return Builder.CreateCall(F, {X, Undef});
- }
- case SystemZ::BI__builtin_s390_vctzb:
- case SystemZ::BI__builtin_s390_vctzh:
- case SystemZ::BI__builtin_s390_vctzf:
- case SystemZ::BI__builtin_s390_vctzg: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
- Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
- return Builder.CreateCall(F, {X, Undef});
- }
- case SystemZ::BI__builtin_s390_vfsqsb:
- case SystemZ::BI__builtin_s390_vfsqdb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
- return Builder.CreateCall(F, X);
- }
- case SystemZ::BI__builtin_s390_vfmasb:
- case SystemZ::BI__builtin_s390_vfmadb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Y = EmitScalarExpr(E->getArg(1));
- Value *Z = EmitScalarExpr(E->getArg(2));
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateCall(F, {X, Y, Z});
- }
- case SystemZ::BI__builtin_s390_vfmssb:
- case SystemZ::BI__builtin_s390_vfmsdb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Y = EmitScalarExpr(E->getArg(1));
- Value *Z = EmitScalarExpr(E->getArg(2));
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
- }
- case SystemZ::BI__builtin_s390_vfnmasb:
- case SystemZ::BI__builtin_s390_vfnmadb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Y = EmitScalarExpr(E->getArg(1));
- Value *Z = EmitScalarExpr(E->getArg(2));
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, Z}), "sub");
- }
- case SystemZ::BI__builtin_s390_vfnmssb:
- case SystemZ::BI__builtin_s390_vfnmsdb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Y = EmitScalarExpr(E->getArg(1));
- Value *Z = EmitScalarExpr(E->getArg(2));
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
- Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
- Value *NegZ = Builder.CreateFSub(Zero, Z, "sub");
- return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, NegZ}));
- }
- case SystemZ::BI__builtin_s390_vflpsb:
- case SystemZ::BI__builtin_s390_vflpdb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
- return Builder.CreateCall(F, X);
- }
- case SystemZ::BI__builtin_s390_vflnsb:
- case SystemZ::BI__builtin_s390_vflndb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
- Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
- return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub");
- }
- case SystemZ::BI__builtin_s390_vfisb:
- case SystemZ::BI__builtin_s390_vfidb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- // Constant-fold the M4 and M5 mask arguments.
- llvm::APSInt M4, M5;
- bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
- bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
- assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
- (void)IsConstM4; (void)IsConstM5;
- // Check whether this instance can be represented via a LLVM standard
- // intrinsic. We only support some combinations of M4 and M5.
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
- switch (M4.getZExtValue()) {
- default: break;
- case 0: // IEEE-inexact exception allowed
- switch (M5.getZExtValue()) {
- default: break;
- case 0: ID = Intrinsic::rint; break;
- }
- break;
- case 4: // IEEE-inexact exception suppressed
- switch (M5.getZExtValue()) {
- default: break;
- case 0: ID = Intrinsic::nearbyint; break;
- case 1: ID = Intrinsic::round; break;
- case 5: ID = Intrinsic::trunc; break;
- case 6: ID = Intrinsic::ceil; break;
- case 7: ID = Intrinsic::floor; break;
- }
- break;
- }
- if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, X);
- }
- switch (BuiltinID) {
- case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
- case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
- default: llvm_unreachable("Unknown BuiltinID");
- }
- Function *F = CGM.getIntrinsic(ID);
- Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
- Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
- return Builder.CreateCall(F, {X, M4Value, M5Value});
- }
- case SystemZ::BI__builtin_s390_vfmaxsb:
- case SystemZ::BI__builtin_s390_vfmaxdb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Y = EmitScalarExpr(E->getArg(1));
- // Constant-fold the M4 mask argument.
- llvm::APSInt M4;
- bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
- assert(IsConstM4 && "Constant arg isn't actually constant?");
- (void)IsConstM4;
- // Check whether this instance can be represented via a LLVM standard
- // intrinsic. We only support some values of M4.
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
- switch (M4.getZExtValue()) {
- default: break;
- case 4: ID = Intrinsic::maxnum; break;
- }
- if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, {X, Y});
- }
- switch (BuiltinID) {
- case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
- case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
- default: llvm_unreachable("Unknown BuiltinID");
- }
- Function *F = CGM.getIntrinsic(ID);
- Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
- return Builder.CreateCall(F, {X, Y, M4Value});
- }
- case SystemZ::BI__builtin_s390_vfminsb:
- case SystemZ::BI__builtin_s390_vfmindb: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Y = EmitScalarExpr(E->getArg(1));
- // Constant-fold the M4 mask argument.
- llvm::APSInt M4;
- bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
- assert(IsConstM4 && "Constant arg isn't actually constant?");
- (void)IsConstM4;
- // Check whether this instance can be represented via a LLVM standard
- // intrinsic. We only support some values of M4.
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
- switch (M4.getZExtValue()) {
- default: break;
- case 4: ID = Intrinsic::minnum; break;
- }
- if (ID != Intrinsic::not_intrinsic) {
- Function *F = CGM.getIntrinsic(ID, ResultType);
- return Builder.CreateCall(F, {X, Y});
- }
- switch (BuiltinID) {
- case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
- case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
- default: llvm_unreachable("Unknown BuiltinID");
- }
- Function *F = CGM.getIntrinsic(ID);
- Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
- return Builder.CreateCall(F, {X, Y, M4Value});
- }
- case SystemZ::BI__builtin_s390_vlbrh:
- case SystemZ::BI__builtin_s390_vlbrf:
- case SystemZ::BI__builtin_s390_vlbrg: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *X = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
- return Builder.CreateCall(F, X);
- }
- // Vector intrinsics that output the post-instruction CC value.
- #define INTRINSIC_WITH_CC(NAME) \
- case SystemZ::BI__builtin_##NAME: \
- return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
- INTRINSIC_WITH_CC(s390_vpkshs);
- INTRINSIC_WITH_CC(s390_vpksfs);
- INTRINSIC_WITH_CC(s390_vpksgs);
- INTRINSIC_WITH_CC(s390_vpklshs);
- INTRINSIC_WITH_CC(s390_vpklsfs);
- INTRINSIC_WITH_CC(s390_vpklsgs);
- INTRINSIC_WITH_CC(s390_vceqbs);
- INTRINSIC_WITH_CC(s390_vceqhs);
- INTRINSIC_WITH_CC(s390_vceqfs);
- INTRINSIC_WITH_CC(s390_vceqgs);
- INTRINSIC_WITH_CC(s390_vchbs);
- INTRINSIC_WITH_CC(s390_vchhs);
- INTRINSIC_WITH_CC(s390_vchfs);
- INTRINSIC_WITH_CC(s390_vchgs);
- INTRINSIC_WITH_CC(s390_vchlbs);
- INTRINSIC_WITH_CC(s390_vchlhs);
- INTRINSIC_WITH_CC(s390_vchlfs);
- INTRINSIC_WITH_CC(s390_vchlgs);
- INTRINSIC_WITH_CC(s390_vfaebs);
- INTRINSIC_WITH_CC(s390_vfaehs);
- INTRINSIC_WITH_CC(s390_vfaefs);
- INTRINSIC_WITH_CC(s390_vfaezbs);
- INTRINSIC_WITH_CC(s390_vfaezhs);
- INTRINSIC_WITH_CC(s390_vfaezfs);
- INTRINSIC_WITH_CC(s390_vfeebs);
- INTRINSIC_WITH_CC(s390_vfeehs);
- INTRINSIC_WITH_CC(s390_vfeefs);
- INTRINSIC_WITH_CC(s390_vfeezbs);
- INTRINSIC_WITH_CC(s390_vfeezhs);
- INTRINSIC_WITH_CC(s390_vfeezfs);
- INTRINSIC_WITH_CC(s390_vfenebs);
- INTRINSIC_WITH_CC(s390_vfenehs);
- INTRINSIC_WITH_CC(s390_vfenefs);
- INTRINSIC_WITH_CC(s390_vfenezbs);
- INTRINSIC_WITH_CC(s390_vfenezhs);
- INTRINSIC_WITH_CC(s390_vfenezfs);
- INTRINSIC_WITH_CC(s390_vistrbs);
- INTRINSIC_WITH_CC(s390_vistrhs);
- INTRINSIC_WITH_CC(s390_vistrfs);
- INTRINSIC_WITH_CC(s390_vstrcbs);
- INTRINSIC_WITH_CC(s390_vstrchs);
- INTRINSIC_WITH_CC(s390_vstrcfs);
- INTRINSIC_WITH_CC(s390_vstrczbs);
- INTRINSIC_WITH_CC(s390_vstrczhs);
- INTRINSIC_WITH_CC(s390_vstrczfs);
- INTRINSIC_WITH_CC(s390_vfcesbs);
- INTRINSIC_WITH_CC(s390_vfcedbs);
- INTRINSIC_WITH_CC(s390_vfchsbs);
- INTRINSIC_WITH_CC(s390_vfchdbs);
- INTRINSIC_WITH_CC(s390_vfchesbs);
- INTRINSIC_WITH_CC(s390_vfchedbs);
- INTRINSIC_WITH_CC(s390_vftcisb);
- INTRINSIC_WITH_CC(s390_vftcidb);
- INTRINSIC_WITH_CC(s390_vstrsb);
- INTRINSIC_WITH_CC(s390_vstrsh);
- INTRINSIC_WITH_CC(s390_vstrsf);
- INTRINSIC_WITH_CC(s390_vstrszb);
- INTRINSIC_WITH_CC(s390_vstrszh);
- INTRINSIC_WITH_CC(s390_vstrszf);
- #undef INTRINSIC_WITH_CC
- default:
- return nullptr;
- }
- }
- namespace {
- // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
- struct NVPTXMmaLdstInfo {
- unsigned NumResults; // Number of elements to load/store
- // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
- unsigned IID_col;
- unsigned IID_row;
- };
- #define MMA_INTR(geom_op_type, layout) \
- Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
- #define MMA_LDST(n, geom_op_type) \
- { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
- static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
- switch (BuiltinID) {
- // FP MMA loads
- case NVPTX::BI__hmma_m16n16k16_ld_a:
- return MMA_LDST(8, m16n16k16_load_a_f16);
- case NVPTX::BI__hmma_m16n16k16_ld_b:
- return MMA_LDST(8, m16n16k16_load_b_f16);
- case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
- return MMA_LDST(4, m16n16k16_load_c_f16);
- case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
- return MMA_LDST(8, m16n16k16_load_c_f32);
- case NVPTX::BI__hmma_m32n8k16_ld_a:
- return MMA_LDST(8, m32n8k16_load_a_f16);
- case NVPTX::BI__hmma_m32n8k16_ld_b:
- return MMA_LDST(8, m32n8k16_load_b_f16);
- case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
- return MMA_LDST(4, m32n8k16_load_c_f16);
- case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
- return MMA_LDST(8, m32n8k16_load_c_f32);
- case NVPTX::BI__hmma_m8n32k16_ld_a:
- return MMA_LDST(8, m8n32k16_load_a_f16);
- case NVPTX::BI__hmma_m8n32k16_ld_b:
- return MMA_LDST(8, m8n32k16_load_b_f16);
- case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
- return MMA_LDST(4, m8n32k16_load_c_f16);
- case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
- return MMA_LDST(8, m8n32k16_load_c_f32);
- // Integer MMA loads
- case NVPTX::BI__imma_m16n16k16_ld_a_s8:
- return MMA_LDST(2, m16n16k16_load_a_s8);
- case NVPTX::BI__imma_m16n16k16_ld_a_u8:
- return MMA_LDST(2, m16n16k16_load_a_u8);
- case NVPTX::BI__imma_m16n16k16_ld_b_s8:
- return MMA_LDST(2, m16n16k16_load_b_s8);
- case NVPTX::BI__imma_m16n16k16_ld_b_u8:
- return MMA_LDST(2, m16n16k16_load_b_u8);
- case NVPTX::BI__imma_m16n16k16_ld_c:
- return MMA_LDST(8, m16n16k16_load_c_s32);
- case NVPTX::BI__imma_m32n8k16_ld_a_s8:
- return MMA_LDST(4, m32n8k16_load_a_s8);
- case NVPTX::BI__imma_m32n8k16_ld_a_u8:
- return MMA_LDST(4, m32n8k16_load_a_u8);
- case NVPTX::BI__imma_m32n8k16_ld_b_s8:
- return MMA_LDST(1, m32n8k16_load_b_s8);
- case NVPTX::BI__imma_m32n8k16_ld_b_u8:
- return MMA_LDST(1, m32n8k16_load_b_u8);
- case NVPTX::BI__imma_m32n8k16_ld_c:
- return MMA_LDST(8, m32n8k16_load_c_s32);
- case NVPTX::BI__imma_m8n32k16_ld_a_s8:
- return MMA_LDST(1, m8n32k16_load_a_s8);
- case NVPTX::BI__imma_m8n32k16_ld_a_u8:
- return MMA_LDST(1, m8n32k16_load_a_u8);
- case NVPTX::BI__imma_m8n32k16_ld_b_s8:
- return MMA_LDST(4, m8n32k16_load_b_s8);
- case NVPTX::BI__imma_m8n32k16_ld_b_u8:
- return MMA_LDST(4, m8n32k16_load_b_u8);
- case NVPTX::BI__imma_m8n32k16_ld_c:
- return MMA_LDST(8, m8n32k16_load_c_s32);
- // Sub-integer MMA loads.
- // Only row/col layout is supported by A/B fragments.
- case NVPTX::BI__imma_m8n8k32_ld_a_s4:
- return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
- case NVPTX::BI__imma_m8n8k32_ld_a_u4:
- return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
- case NVPTX::BI__imma_m8n8k32_ld_b_s4:
- return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
- case NVPTX::BI__imma_m8n8k32_ld_b_u4:
- return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
- case NVPTX::BI__imma_m8n8k32_ld_c:
- return MMA_LDST(2, m8n8k32_load_c_s32);
- case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
- return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
- case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
- return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
- case NVPTX::BI__bmma_m8n8k128_ld_c:
- return MMA_LDST(2, m8n8k128_load_c_s32);
- // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
- // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
- // use fragment C for both loads and stores.
- // FP MMA stores.
- case NVPTX::BI__hmma_m16n16k16_st_c_f16:
- return MMA_LDST(4, m16n16k16_store_d_f16);
- case NVPTX::BI__hmma_m16n16k16_st_c_f32:
- return MMA_LDST(8, m16n16k16_store_d_f32);
- case NVPTX::BI__hmma_m32n8k16_st_c_f16:
- return MMA_LDST(4, m32n8k16_store_d_f16);
- case NVPTX::BI__hmma_m32n8k16_st_c_f32:
- return MMA_LDST(8, m32n8k16_store_d_f32);
- case NVPTX::BI__hmma_m8n32k16_st_c_f16:
- return MMA_LDST(4, m8n32k16_store_d_f16);
- case NVPTX::BI__hmma_m8n32k16_st_c_f32:
- return MMA_LDST(8, m8n32k16_store_d_f32);
- // Integer and sub-integer MMA stores.
- // Another naming quirk. Unlike other MMA builtins that use PTX types in the
- // name, integer loads/stores use LLVM's i32.
- case NVPTX::BI__imma_m16n16k16_st_c_i32:
- return MMA_LDST(8, m16n16k16_store_d_s32);
- case NVPTX::BI__imma_m32n8k16_st_c_i32:
- return MMA_LDST(8, m32n8k16_store_d_s32);
- case NVPTX::BI__imma_m8n32k16_st_c_i32:
- return MMA_LDST(8, m8n32k16_store_d_s32);
- case NVPTX::BI__imma_m8n8k32_st_c_i32:
- return MMA_LDST(2, m8n8k32_store_d_s32);
- case NVPTX::BI__bmma_m8n8k128_st_c_i32:
- return MMA_LDST(2, m8n8k128_store_d_s32);
- default:
- llvm_unreachable("Unknown MMA builtin");
- }
- }
- #undef MMA_LDST
- #undef MMA_INTR
- struct NVPTXMmaInfo {
- unsigned NumEltsA;
- unsigned NumEltsB;
- unsigned NumEltsC;
- unsigned NumEltsD;
- std::array<unsigned, 8> Variants;
- unsigned getMMAIntrinsic(int Layout, bool Satf) {
- unsigned Index = Layout * 2 + Satf;
- if (Index >= Variants.size())
- return 0;
- return Variants[Index];
- }
- };
- // Returns an intrinsic that matches Layout and Satf for valid combinations of
- // Layout and Satf, 0 otherwise.
- static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
- // clang-format off
- #define MMA_VARIANTS(geom, type) {{ \
- Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
- }}
- // Sub-integer MMA only supports row.col layout.
- #define MMA_VARIANTS_I4(geom, type) {{ \
- 0, \
- 0, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
- 0, \
- 0, \
- 0, \
- 0 \
- }}
- // b1 MMA does not support .satfinite.
- #define MMA_VARIANTS_B1(geom, type) {{ \
- 0, \
- 0, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
- 0, \
- 0, \
- 0, \
- 0, \
- 0 \
- }}
- // clang-format on
- switch (BuiltinID) {
- // FP MMA
- // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
- // NumEltsN of return value are ordered as A,B,C,D.
- case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
- return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
- case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
- return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
- case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
- return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
- case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
- return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
- case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
- return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
- case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
- return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
- case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
- return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
- case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
- return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
- case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
- return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
- case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
- return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
- case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
- return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
- case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
- return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
- // Integer MMA
- case NVPTX::BI__imma_m16n16k16_mma_s8:
- return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
- case NVPTX::BI__imma_m16n16k16_mma_u8:
- return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
- case NVPTX::BI__imma_m32n8k16_mma_s8:
- return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
- case NVPTX::BI__imma_m32n8k16_mma_u8:
- return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
- case NVPTX::BI__imma_m8n32k16_mma_s8:
- return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
- case NVPTX::BI__imma_m8n32k16_mma_u8:
- return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
- // Sub-integer MMA
- case NVPTX::BI__imma_m8n8k32_mma_s4:
- return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
- case NVPTX::BI__imma_m8n8k32_mma_u4:
- return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
- case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
- return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
- default:
- llvm_unreachable("Unexpected builtin ID.");
- }
- #undef MMA_VARIANTS
- #undef MMA_VARIANTS_I4
- #undef MMA_VARIANTS_B1
- }
- } // namespace
- Value *
- CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
- auto MakeLdg = [&](unsigned IntrinsicID) {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- clang::CharUnits Align =
- getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
- return Builder.CreateCall(
- CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
- Ptr->getType()}),
- {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
- };
- auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(
- CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
- Ptr->getType()}),
- {Ptr, EmitScalarExpr(E->getArg(1))});
- };
- switch (BuiltinID) {
- case NVPTX::BI__nvvm_atom_add_gen_i:
- case NVPTX::BI__nvvm_atom_add_gen_l:
- case NVPTX::BI__nvvm_atom_add_gen_ll:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
- case NVPTX::BI__nvvm_atom_sub_gen_i:
- case NVPTX::BI__nvvm_atom_sub_gen_l:
- case NVPTX::BI__nvvm_atom_sub_gen_ll:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
- case NVPTX::BI__nvvm_atom_and_gen_i:
- case NVPTX::BI__nvvm_atom_and_gen_l:
- case NVPTX::BI__nvvm_atom_and_gen_ll:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
- case NVPTX::BI__nvvm_atom_or_gen_i:
- case NVPTX::BI__nvvm_atom_or_gen_l:
- case NVPTX::BI__nvvm_atom_or_gen_ll:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
- case NVPTX::BI__nvvm_atom_xor_gen_i:
- case NVPTX::BI__nvvm_atom_xor_gen_l:
- case NVPTX::BI__nvvm_atom_xor_gen_ll:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
- case NVPTX::BI__nvvm_atom_xchg_gen_i:
- case NVPTX::BI__nvvm_atom_xchg_gen_l:
- case NVPTX::BI__nvvm_atom_xchg_gen_ll:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
- case NVPTX::BI__nvvm_atom_max_gen_i:
- case NVPTX::BI__nvvm_atom_max_gen_l:
- case NVPTX::BI__nvvm_atom_max_gen_ll:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
- case NVPTX::BI__nvvm_atom_max_gen_ui:
- case NVPTX::BI__nvvm_atom_max_gen_ul:
- case NVPTX::BI__nvvm_atom_max_gen_ull:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
- case NVPTX::BI__nvvm_atom_min_gen_i:
- case NVPTX::BI__nvvm_atom_min_gen_l:
- case NVPTX::BI__nvvm_atom_min_gen_ll:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
- case NVPTX::BI__nvvm_atom_min_gen_ui:
- case NVPTX::BI__nvvm_atom_min_gen_ul:
- case NVPTX::BI__nvvm_atom_min_gen_ull:
- return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
- case NVPTX::BI__nvvm_atom_cas_gen_i:
- case NVPTX::BI__nvvm_atom_cas_gen_l:
- case NVPTX::BI__nvvm_atom_cas_gen_ll:
- // __nvvm_atom_cas_gen_* should return the old value rather than the
- // success flag.
- return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
- case NVPTX::BI__nvvm_atom_add_gen_f:
- case NVPTX::BI__nvvm_atom_add_gen_d: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Value *Val = EmitScalarExpr(E->getArg(1));
- return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
- AtomicOrdering::SequentiallyConsistent);
- }
- case NVPTX::BI__nvvm_atom_inc_gen_ui: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Value *Val = EmitScalarExpr(E->getArg(1));
- Function *FnALI32 =
- CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
- return Builder.CreateCall(FnALI32, {Ptr, Val});
- }
- case NVPTX::BI__nvvm_atom_dec_gen_ui: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Value *Val = EmitScalarExpr(E->getArg(1));
- Function *FnALD32 =
- CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
- return Builder.CreateCall(FnALD32, {Ptr, Val});
- }
- case NVPTX::BI__nvvm_ldg_c:
- case NVPTX::BI__nvvm_ldg_c2:
- case NVPTX::BI__nvvm_ldg_c4:
- case NVPTX::BI__nvvm_ldg_s:
- case NVPTX::BI__nvvm_ldg_s2:
- case NVPTX::BI__nvvm_ldg_s4:
- case NVPTX::BI__nvvm_ldg_i:
- case NVPTX::BI__nvvm_ldg_i2:
- case NVPTX::BI__nvvm_ldg_i4:
- case NVPTX::BI__nvvm_ldg_l:
- case NVPTX::BI__nvvm_ldg_ll:
- case NVPTX::BI__nvvm_ldg_ll2:
- case NVPTX::BI__nvvm_ldg_uc:
- case NVPTX::BI__nvvm_ldg_uc2:
- case NVPTX::BI__nvvm_ldg_uc4:
- case NVPTX::BI__nvvm_ldg_us:
- case NVPTX::BI__nvvm_ldg_us2:
- case NVPTX::BI__nvvm_ldg_us4:
- case NVPTX::BI__nvvm_ldg_ui:
- case NVPTX::BI__nvvm_ldg_ui2:
- case NVPTX::BI__nvvm_ldg_ui4:
- case NVPTX::BI__nvvm_ldg_ul:
- case NVPTX::BI__nvvm_ldg_ull:
- case NVPTX::BI__nvvm_ldg_ull2:
- // PTX Interoperability section 2.2: "For a vector with an even number of
- // elements, its alignment is set to number of elements times the alignment
- // of its member: n*alignof(t)."
- return MakeLdg(Intrinsic::nvvm_ldg_global_i);
- case NVPTX::BI__nvvm_ldg_f:
- case NVPTX::BI__nvvm_ldg_f2:
- case NVPTX::BI__nvvm_ldg_f4:
- case NVPTX::BI__nvvm_ldg_d:
- case NVPTX::BI__nvvm_ldg_d2:
- return MakeLdg(Intrinsic::nvvm_ldg_global_f);
- case NVPTX::BI__nvvm_atom_cta_add_gen_i:
- case NVPTX::BI__nvvm_atom_cta_add_gen_l:
- case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
- case NVPTX::BI__nvvm_atom_sys_add_gen_i:
- case NVPTX::BI__nvvm_atom_sys_add_gen_l:
- case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
- case NVPTX::BI__nvvm_atom_cta_add_gen_f:
- case NVPTX::BI__nvvm_atom_cta_add_gen_d:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
- case NVPTX::BI__nvvm_atom_sys_add_gen_f:
- case NVPTX::BI__nvvm_atom_sys_add_gen_d:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
- case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
- case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
- case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
- case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
- case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
- case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
- case NVPTX::BI__nvvm_atom_cta_max_gen_i:
- case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
- case NVPTX::BI__nvvm_atom_cta_max_gen_l:
- case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
- case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
- case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
- case NVPTX::BI__nvvm_atom_sys_max_gen_i:
- case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
- case NVPTX::BI__nvvm_atom_sys_max_gen_l:
- case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
- case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
- case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
- case NVPTX::BI__nvvm_atom_cta_min_gen_i:
- case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
- case NVPTX::BI__nvvm_atom_cta_min_gen_l:
- case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
- case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
- case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
- case NVPTX::BI__nvvm_atom_sys_min_gen_i:
- case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
- case NVPTX::BI__nvvm_atom_sys_min_gen_l:
- case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
- case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
- case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
- case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
- case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
- case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
- case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
- case NVPTX::BI__nvvm_atom_cta_and_gen_i:
- case NVPTX::BI__nvvm_atom_cta_and_gen_l:
- case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
- case NVPTX::BI__nvvm_atom_sys_and_gen_i:
- case NVPTX::BI__nvvm_atom_sys_and_gen_l:
- case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
- case NVPTX::BI__nvvm_atom_cta_or_gen_i:
- case NVPTX::BI__nvvm_atom_cta_or_gen_l:
- case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
- case NVPTX::BI__nvvm_atom_sys_or_gen_i:
- case NVPTX::BI__nvvm_atom_sys_or_gen_l:
- case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
- case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
- case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
- case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
- case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
- case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
- case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
- case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
- case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
- case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(
- CGM.getIntrinsic(
- Intrinsic::nvvm_atomic_cas_gen_i_cta,
- {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
- {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
- }
- case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
- case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
- case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(
- CGM.getIntrinsic(
- Intrinsic::nvvm_atomic_cas_gen_i_sys,
- {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
- {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
- }
- case NVPTX::BI__nvvm_match_all_sync_i32p:
- case NVPTX::BI__nvvm_match_all_sync_i64p: {
- Value *Mask = EmitScalarExpr(E->getArg(0));
- Value *Val = EmitScalarExpr(E->getArg(1));
- Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
- Value *ResultPair = Builder.CreateCall(
- CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
- ? Intrinsic::nvvm_match_all_sync_i32p
- : Intrinsic::nvvm_match_all_sync_i64p),
- {Mask, Val});
- Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
- PredOutPtr.getElementType());
- Builder.CreateStore(Pred, PredOutPtr);
- return Builder.CreateExtractValue(ResultPair, 0);
- }
- // FP MMA loads
- case NVPTX::BI__hmma_m16n16k16_ld_a:
- case NVPTX::BI__hmma_m16n16k16_ld_b:
- case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
- case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
- case NVPTX::BI__hmma_m32n8k16_ld_a:
- case NVPTX::BI__hmma_m32n8k16_ld_b:
- case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
- case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
- case NVPTX::BI__hmma_m8n32k16_ld_a:
- case NVPTX::BI__hmma_m8n32k16_ld_b:
- case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
- case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
- // Integer MMA loads.
- case NVPTX::BI__imma_m16n16k16_ld_a_s8:
- case NVPTX::BI__imma_m16n16k16_ld_a_u8:
- case NVPTX::BI__imma_m16n16k16_ld_b_s8:
- case NVPTX::BI__imma_m16n16k16_ld_b_u8:
- case NVPTX::BI__imma_m16n16k16_ld_c:
- case NVPTX::BI__imma_m32n8k16_ld_a_s8:
- case NVPTX::BI__imma_m32n8k16_ld_a_u8:
- case NVPTX::BI__imma_m32n8k16_ld_b_s8:
- case NVPTX::BI__imma_m32n8k16_ld_b_u8:
- case NVPTX::BI__imma_m32n8k16_ld_c:
- case NVPTX::BI__imma_m8n32k16_ld_a_s8:
- case NVPTX::BI__imma_m8n32k16_ld_a_u8:
- case NVPTX::BI__imma_m8n32k16_ld_b_s8:
- case NVPTX::BI__imma_m8n32k16_ld_b_u8:
- case NVPTX::BI__imma_m8n32k16_ld_c:
- // Sub-integer MMA loads.
- case NVPTX::BI__imma_m8n8k32_ld_a_s4:
- case NVPTX::BI__imma_m8n8k32_ld_a_u4:
- case NVPTX::BI__imma_m8n8k32_ld_b_s4:
- case NVPTX::BI__imma_m8n8k32_ld_b_u4:
- case NVPTX::BI__imma_m8n8k32_ld_c:
- case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
- case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
- case NVPTX::BI__bmma_m8n8k128_ld_c:
- {
- Address Dst = EmitPointerWithAlignment(E->getArg(0));
- Value *Src = EmitScalarExpr(E->getArg(1));
- Value *Ldm = EmitScalarExpr(E->getArg(2));
- llvm::APSInt isColMajorArg;
- if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
- return nullptr;
- bool isColMajor = isColMajorArg.getSExtValue();
- NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
- unsigned IID = isColMajor ? II.IID_col : II.IID_row;
- if (IID == 0)
- return nullptr;
- Value *Result =
- Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
- // Save returned values.
- assert(II.NumResults);
- if (II.NumResults == 1) {
- Builder.CreateAlignedStore(Result, Dst.getPointer(),
- CharUnits::fromQuantity(4));
- } else {
- for (unsigned i = 0; i < II.NumResults; ++i) {
- Builder.CreateAlignedStore(
- Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
- Dst.getElementType()),
- Builder.CreateGEP(Dst.getPointer(),
- llvm::ConstantInt::get(IntTy, i)),
- CharUnits::fromQuantity(4));
- }
- }
- return Result;
- }
- case NVPTX::BI__hmma_m16n16k16_st_c_f16:
- case NVPTX::BI__hmma_m16n16k16_st_c_f32:
- case NVPTX::BI__hmma_m32n8k16_st_c_f16:
- case NVPTX::BI__hmma_m32n8k16_st_c_f32:
- case NVPTX::BI__hmma_m8n32k16_st_c_f16:
- case NVPTX::BI__hmma_m8n32k16_st_c_f32:
- case NVPTX::BI__imma_m16n16k16_st_c_i32:
- case NVPTX::BI__imma_m32n8k16_st_c_i32:
- case NVPTX::BI__imma_m8n32k16_st_c_i32:
- case NVPTX::BI__imma_m8n8k32_st_c_i32:
- case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
- Value *Dst = EmitScalarExpr(E->getArg(0));
- Address Src = EmitPointerWithAlignment(E->getArg(1));
- Value *Ldm = EmitScalarExpr(E->getArg(2));
- llvm::APSInt isColMajorArg;
- if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
- return nullptr;
- bool isColMajor = isColMajorArg.getSExtValue();
- NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
- unsigned IID = isColMajor ? II.IID_col : II.IID_row;
- if (IID == 0)
- return nullptr;
- Function *Intrinsic =
- CGM.getIntrinsic(IID, Dst->getType());
- llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
- SmallVector<Value *, 10> Values = {Dst};
- for (unsigned i = 0; i < II.NumResults; ++i) {
- Value *V = Builder.CreateAlignedLoad(
- Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
- CharUnits::fromQuantity(4));
- Values.push_back(Builder.CreateBitCast(V, ParamType));
- }
- Values.push_back(Ldm);
- Value *Result = Builder.CreateCall(Intrinsic, Values);
- return Result;
- }
- // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
- // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
- case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
- case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
- case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
- case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
- case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
- case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
- case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
- case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
- case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
- case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
- case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
- case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
- case NVPTX::BI__imma_m16n16k16_mma_s8:
- case NVPTX::BI__imma_m16n16k16_mma_u8:
- case NVPTX::BI__imma_m32n8k16_mma_s8:
- case NVPTX::BI__imma_m32n8k16_mma_u8:
- case NVPTX::BI__imma_m8n32k16_mma_s8:
- case NVPTX::BI__imma_m8n32k16_mma_u8:
- case NVPTX::BI__imma_m8n8k32_mma_s4:
- case NVPTX::BI__imma_m8n8k32_mma_u4:
- case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
- Address Dst = EmitPointerWithAlignment(E->getArg(0));
- Address SrcA = EmitPointerWithAlignment(E->getArg(1));
- Address SrcB = EmitPointerWithAlignment(E->getArg(2));
- Address SrcC = EmitPointerWithAlignment(E->getArg(3));
- llvm::APSInt LayoutArg;
- if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext()))
- return nullptr;
- int Layout = LayoutArg.getSExtValue();
- if (Layout < 0 || Layout > 3)
- return nullptr;
- llvm::APSInt SatfArg;
- if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
- SatfArg = 0; // .b1 does not have satf argument.
- else if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
- return nullptr;
- bool Satf = SatfArg.getSExtValue();
- NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
- unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
- if (IID == 0) // Unsupported combination of Layout/Satf.
- return nullptr;
- SmallVector<Value *, 24> Values;
- Function *Intrinsic = CGM.getIntrinsic(IID);
- llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
- // Load A
- for (unsigned i = 0; i < MI.NumEltsA; ++i) {
- Value *V = Builder.CreateAlignedLoad(
- Builder.CreateGEP(SrcA.getPointer(),
- llvm::ConstantInt::get(IntTy, i)),
- CharUnits::fromQuantity(4));
- Values.push_back(Builder.CreateBitCast(V, AType));
- }
- // Load B
- llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
- for (unsigned i = 0; i < MI.NumEltsB; ++i) {
- Value *V = Builder.CreateAlignedLoad(
- Builder.CreateGEP(SrcB.getPointer(),
- llvm::ConstantInt::get(IntTy, i)),
- CharUnits::fromQuantity(4));
- Values.push_back(Builder.CreateBitCast(V, BType));
- }
- // Load C
- llvm::Type *CType =
- Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
- for (unsigned i = 0; i < MI.NumEltsC; ++i) {
- Value *V = Builder.CreateAlignedLoad(
- Builder.CreateGEP(SrcC.getPointer(),
- llvm::ConstantInt::get(IntTy, i)),
- CharUnits::fromQuantity(4));
- Values.push_back(Builder.CreateBitCast(V, CType));
- }
- Value *Result = Builder.CreateCall(Intrinsic, Values);
- llvm::Type *DType = Dst.getElementType();
- for (unsigned i = 0; i < MI.NumEltsD; ++i)
- Builder.CreateAlignedStore(
- Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
- Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
- CharUnits::fromQuantity(4));
- return Result;
- }
- default:
- return nullptr;
- }
- }
- Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_memory_size: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *I = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
- return Builder.CreateCall(Callee, I);
- }
- case WebAssembly::BI__builtin_wasm_memory_grow: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Args[] = {
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1))
- };
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
- return Builder.CreateCall(Callee, Args);
- }
- case WebAssembly::BI__builtin_wasm_memory_init: {
- llvm::APSInt SegConst;
- if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- llvm::APSInt MemConst;
- if (!E->getArg(1)->isIntegerConstantExpr(MemConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- if (!MemConst.isNullValue())
- ErrorUnsupported(E, "non-zero memory index");
- Value *Args[] = {llvm::ConstantInt::get(getLLVMContext(), SegConst),
- llvm::ConstantInt::get(getLLVMContext(), MemConst),
- EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)),
- EmitScalarExpr(E->getArg(4))};
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_init);
- return Builder.CreateCall(Callee, Args);
- }
- case WebAssembly::BI__builtin_wasm_data_drop: {
- llvm::APSInt SegConst;
- if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- Value *Arg = llvm::ConstantInt::get(getLLVMContext(), SegConst);
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_data_drop);
- return Builder.CreateCall(Callee, {Arg});
- }
- case WebAssembly::BI__builtin_wasm_throw: {
- Value *Tag = EmitScalarExpr(E->getArg(0));
- Value *Obj = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
- return Builder.CreateCall(Callee, {Tag, Obj});
- }
- case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
- return Builder.CreateCall(Callee);
- }
- case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
- Value *Addr = EmitScalarExpr(E->getArg(0));
- Value *Expected = EmitScalarExpr(E->getArg(1));
- Value *Timeout = EmitScalarExpr(E->getArg(2));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
- return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
- }
- case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
- Value *Addr = EmitScalarExpr(E->getArg(0));
- Value *Expected = EmitScalarExpr(E->getArg(1));
- Value *Timeout = EmitScalarExpr(E->getArg(2));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
- return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
- }
- case WebAssembly::BI__builtin_wasm_atomic_notify: {
- Value *Addr = EmitScalarExpr(E->getArg(0));
- Value *Count = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
- return Builder.CreateCall(Callee, {Addr, Count});
- }
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
- Value *Src = EmitScalarExpr(E->getArg(0));
- llvm::Type *ResT = ConvertType(E->getType());
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
- {ResT, Src->getType()});
- return Builder.CreateCall(Callee, {Src});
- }
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
- Value *Src = EmitScalarExpr(E->getArg(0));
- llvm::Type *ResT = ConvertType(E->getType());
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
- {ResT, Src->getType()});
- return Builder.CreateCall(Callee, {Src});
- }
- case WebAssembly::BI__builtin_wasm_min_f32:
- case WebAssembly::BI__builtin_wasm_min_f64:
- case WebAssembly::BI__builtin_wasm_min_f32x4:
- case WebAssembly::BI__builtin_wasm_min_f64x2: {
- Value *LHS = EmitScalarExpr(E->getArg(0));
- Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
- ConvertType(E->getType()));
- return Builder.CreateCall(Callee, {LHS, RHS});
- }
- case WebAssembly::BI__builtin_wasm_max_f32:
- case WebAssembly::BI__builtin_wasm_max_f64:
- case WebAssembly::BI__builtin_wasm_max_f32x4:
- case WebAssembly::BI__builtin_wasm_max_f64x2: {
- Value *LHS = EmitScalarExpr(E->getArg(0));
- Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
- ConvertType(E->getType()));
- return Builder.CreateCall(Callee, {LHS, RHS});
- }
- case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
- case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
- case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
- case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
- case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
- case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
- case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
- case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
- llvm::APSInt LaneConst;
- if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
- Value *Extract = Builder.CreateExtractElement(Vec, Lane);
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
- case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
- return Builder.CreateSExt(Extract, ConvertType(E->getType()));
- case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
- case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
- return Builder.CreateZExt(Extract, ConvertType(E->getType()));
- case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
- case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
- case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
- case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
- return Extract;
- default:
- llvm_unreachable("unexpected builtin ID");
- }
- }
- case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
- case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
- case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
- case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
- case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
- case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
- llvm::APSInt LaneConst;
- if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
- llvm_unreachable("Constant arg isn't actually constant?");
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
- Value *Val = EmitScalarExpr(E->getArg(2));
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
- case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
- llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType();
- Value *Trunc = Builder.CreateTrunc(Val, ElemType);
- return Builder.CreateInsertElement(Vec, Trunc, Lane);
- }
- case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
- case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
- case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
- case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
- return Builder.CreateInsertElement(Vec, Val, Lane);
- default:
- llvm_unreachable("unexpected builtin ID");
- }
- }
- case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
- case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
- case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
- case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
- case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
- case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
- case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
- case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
- unsigned IntNo;
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
- case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
- IntNo = Intrinsic::sadd_sat;
- break;
- case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
- case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
- IntNo = Intrinsic::uadd_sat;
- break;
- case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
- case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
- IntNo = Intrinsic::wasm_sub_saturate_signed;
- break;
- case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
- case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
- IntNo = Intrinsic::wasm_sub_saturate_unsigned;
- break;
- default:
- llvm_unreachable("unexpected builtin ID");
- }
- Value *LHS = EmitScalarExpr(E->getArg(0));
- Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
- return Builder.CreateCall(Callee, {LHS, RHS});
- }
- case WebAssembly::BI__builtin_wasm_bitselect: {
- Value *V1 = EmitScalarExpr(E->getArg(0));
- Value *V2 = EmitScalarExpr(E->getArg(1));
- Value *C = EmitScalarExpr(E->getArg(2));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
- ConvertType(E->getType()));
- return Builder.CreateCall(Callee, {V1, V2, C});
- }
- case WebAssembly::BI__builtin_wasm_any_true_i8x16:
- case WebAssembly::BI__builtin_wasm_any_true_i16x8:
- case WebAssembly::BI__builtin_wasm_any_true_i32x4:
- case WebAssembly::BI__builtin_wasm_any_true_i64x2:
- case WebAssembly::BI__builtin_wasm_all_true_i8x16:
- case WebAssembly::BI__builtin_wasm_all_true_i16x8:
- case WebAssembly::BI__builtin_wasm_all_true_i32x4:
- case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
- unsigned IntNo;
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_any_true_i8x16:
- case WebAssembly::BI__builtin_wasm_any_true_i16x8:
- case WebAssembly::BI__builtin_wasm_any_true_i32x4:
- case WebAssembly::BI__builtin_wasm_any_true_i64x2:
- IntNo = Intrinsic::wasm_anytrue;
- break;
- case WebAssembly::BI__builtin_wasm_all_true_i8x16:
- case WebAssembly::BI__builtin_wasm_all_true_i16x8:
- case WebAssembly::BI__builtin_wasm_all_true_i32x4:
- case WebAssembly::BI__builtin_wasm_all_true_i64x2:
- IntNo = Intrinsic::wasm_alltrue;
- break;
- default:
- llvm_unreachable("unexpected builtin ID");
- }
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
- return Builder.CreateCall(Callee, {Vec});
- }
- case WebAssembly::BI__builtin_wasm_abs_f32x4:
- case WebAssembly::BI__builtin_wasm_abs_f64x2: {
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
- return Builder.CreateCall(Callee, {Vec});
- }
- case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
- case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
- return Builder.CreateCall(Callee, {Vec});
- }
- default:
- return nullptr;
- }
- }
- Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- SmallVector<llvm::Value *, 4> Ops;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
- auto MakeCircLd = [&](unsigned IntID, bool HasImm) {
- // The base pointer is passed by address, so it needs to be loaded.
- Address BP = EmitPointerWithAlignment(E->getArg(0));
- BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
- BP.getAlignment());
- llvm::Value *Base = Builder.CreateLoad(BP);
- // Operands are Base, Increment, Modifier, Start.
- if (HasImm)
- Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)) };
- else
- Ops = { Base, EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)) };
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
- llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1);
- llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
- NewBase->getType()->getPointerTo());
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- // The intrinsic generates two results. The new value for the base pointer
- // needs to be stored.
- Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
- return Builder.CreateExtractValue(Result, 0);
- };
- auto MakeCircSt = [&](unsigned IntID, bool HasImm) {
- // The base pointer is passed by address, so it needs to be loaded.
- Address BP = EmitPointerWithAlignment(E->getArg(0));
- BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
- BP.getAlignment());
- llvm::Value *Base = Builder.CreateLoad(BP);
- // Operands are Base, Increment, Modifier, Value, Start.
- if (HasImm)
- Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) };
- else
- Ops = { Base, EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) };
- llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
- llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
- NewBase->getType()->getPointerTo());
- Address Dest = EmitPointerWithAlignment(E->getArg(0));
- // The intrinsic generates one result, which is the new value for the base
- // pointer. It needs to be stored.
- return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
- };
- // Handle the conversion of bit-reverse load intrinsics to bit code.
- // The intrinsic call after this function only reads from memory and the
- // write to memory is dealt by the store instruction.
- auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) {
- // The intrinsic generates one result, which is the new value for the base
- // pointer. It needs to be returned. The result of the load instruction is
- // passed to intrinsic by address, so the value needs to be stored.
- llvm::Value *BaseAddress =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
- // Expressions like &(*pt++) will be incremented per evaluation.
- // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
- // per call.
- Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
- DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
- DestAddr.getAlignment());
- llvm::Value *DestAddress = DestAddr.getPointer();
- // Operands are Base, Dest, Modifier.
- // The intrinsic format in LLVM IR is defined as
- // { ValueType, i8* } (i8*, i32).
- Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))};
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
- // The value needs to be stored as the variable is passed by reference.
- llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
- // The store needs to be truncated to fit the destination type.
- // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
- // to be handled with stores of respective destination type.
- DestVal = Builder.CreateTrunc(DestVal, DestTy);
- llvm::Value *DestForStore =
- Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
- Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
- // The updated value of the base pointer is returned.
- return Builder.CreateExtractValue(Result, 1);
- };
- switch (BuiltinID) {
- case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
- case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: {
- Address Dest = EmitPointerWithAlignment(E->getArg(2));
- unsigned Size;
- if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) {
- Size = 512;
- ID = Intrinsic::hexagon_V6_vaddcarry;
- } else {
- Size = 1024;
- ID = Intrinsic::hexagon_V6_vaddcarry_128B;
- }
- Dest = Builder.CreateBitCast(Dest,
- llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
- LoadInst *QLd = Builder.CreateLoad(Dest);
- Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
- llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
- Vprd->getType()->getPointerTo(0));
- Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
- return Builder.CreateExtractValue(Result, 0);
- }
- case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
- case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
- Address Dest = EmitPointerWithAlignment(E->getArg(2));
- unsigned Size;
- if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) {
- Size = 512;
- ID = Intrinsic::hexagon_V6_vsubcarry;
- } else {
- Size = 1024;
- ID = Intrinsic::hexagon_V6_vsubcarry_128B;
- }
- Dest = Builder.CreateBitCast(Dest,
- llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
- LoadInst *QLd = Builder.CreateLoad(Dest);
- Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
- llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
- llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
- llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
- Vprd->getType()->getPointerTo(0));
- Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
- return Builder.CreateExtractValue(Result, 0);
- }
- case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
- return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
- return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true);
- case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
- return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false);
- case Hexagon::BI__builtin_brev_ldub:
- return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
- case Hexagon::BI__builtin_brev_ldb:
- return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
- case Hexagon::BI__builtin_brev_lduh:
- return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
- case Hexagon::BI__builtin_brev_ldh:
- return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
- case Hexagon::BI__builtin_brev_ldw:
- return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
- case Hexagon::BI__builtin_brev_ldd:
- return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
- default:
- break;
- } // switch
- return nullptr;
- }
|