CGOpenMPRuntime.cpp 407 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655
  1. //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This provides a class for OpenMP runtime code generation.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CGCXXABI.h"
  14. #include "CGCleanup.h"
  15. #include "CGOpenMPRuntime.h"
  16. #include "CGRecordLayout.h"
  17. #include "CodeGenFunction.h"
  18. #include "clang/CodeGen/ConstantInitBuilder.h"
  19. #include "clang/AST/Decl.h"
  20. #include "clang/AST/StmtOpenMP.h"
  21. #include "clang/Basic/BitmaskEnum.h"
  22. #include "llvm/ADT/ArrayRef.h"
  23. #include "llvm/Bitcode/BitcodeReader.h"
  24. #include "llvm/IR/CallSite.h"
  25. #include "llvm/IR/DerivedTypes.h"
  26. #include "llvm/IR/GlobalValue.h"
  27. #include "llvm/IR/Value.h"
  28. #include "llvm/Support/Format.h"
  29. #include "llvm/Support/raw_ostream.h"
  30. #include <cassert>
  31. using namespace clang;
  32. using namespace CodeGen;
  33. namespace {
  34. /// Base class for handling code generation inside OpenMP regions.
  35. class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
  36. public:
  37. /// Kinds of OpenMP regions used in codegen.
  38. enum CGOpenMPRegionKind {
  39. /// Region with outlined function for standalone 'parallel'
  40. /// directive.
  41. ParallelOutlinedRegion,
  42. /// Region with outlined function for standalone 'task' directive.
  43. TaskOutlinedRegion,
  44. /// Region for constructs that do not require function outlining,
  45. /// like 'for', 'sections', 'atomic' etc. directives.
  46. InlinedRegion,
  47. /// Region with outlined function for standalone 'target' directive.
  48. TargetRegion,
  49. };
  50. CGOpenMPRegionInfo(const CapturedStmt &CS,
  51. const CGOpenMPRegionKind RegionKind,
  52. const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
  53. bool HasCancel)
  54. : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
  55. CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
  56. CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
  57. const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
  58. bool HasCancel)
  59. : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
  60. Kind(Kind), HasCancel(HasCancel) {}
  61. /// Get a variable or parameter for storing global thread id
  62. /// inside OpenMP construct.
  63. virtual const VarDecl *getThreadIDVariable() const = 0;
  64. /// Emit the captured statement body.
  65. void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
  66. /// Get an LValue for the current ThreadID variable.
  67. /// \return LValue for thread id variable. This LValue always has type int32*.
  68. virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
  69. virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
  70. CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
  71. OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
  72. bool hasCancel() const { return HasCancel; }
  73. static bool classof(const CGCapturedStmtInfo *Info) {
  74. return Info->getKind() == CR_OpenMP;
  75. }
  76. ~CGOpenMPRegionInfo() override = default;
  77. protected:
  78. CGOpenMPRegionKind RegionKind;
  79. RegionCodeGenTy CodeGen;
  80. OpenMPDirectiveKind Kind;
  81. bool HasCancel;
  82. };
  83. /// API for captured statement code generation in OpenMP constructs.
  84. class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
  85. public:
  86. CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
  87. const RegionCodeGenTy &CodeGen,
  88. OpenMPDirectiveKind Kind, bool HasCancel,
  89. StringRef HelperName)
  90. : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
  91. HasCancel),
  92. ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
  93. assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
  94. }
  95. /// Get a variable or parameter for storing global thread id
  96. /// inside OpenMP construct.
  97. const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
  98. /// Get the name of the capture helper.
  99. StringRef getHelperName() const override { return HelperName; }
  100. static bool classof(const CGCapturedStmtInfo *Info) {
  101. return CGOpenMPRegionInfo::classof(Info) &&
  102. cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
  103. ParallelOutlinedRegion;
  104. }
  105. private:
  106. /// A variable or parameter storing global thread id for OpenMP
  107. /// constructs.
  108. const VarDecl *ThreadIDVar;
  109. StringRef HelperName;
  110. };
  111. /// API for captured statement code generation in OpenMP constructs.
  112. class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
  113. public:
  114. class UntiedTaskActionTy final : public PrePostActionTy {
  115. bool Untied;
  116. const VarDecl *PartIDVar;
  117. const RegionCodeGenTy UntiedCodeGen;
  118. llvm::SwitchInst *UntiedSwitch = nullptr;
  119. public:
  120. UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
  121. const RegionCodeGenTy &UntiedCodeGen)
  122. : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
  123. void Enter(CodeGenFunction &CGF) override {
  124. if (Untied) {
  125. // Emit task switching point.
  126. LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
  127. CGF.GetAddrOfLocalVar(PartIDVar),
  128. PartIDVar->getType()->castAs<PointerType>());
  129. llvm::Value *Res =
  130. CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
  131. llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
  132. UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
  133. CGF.EmitBlock(DoneBB);
  134. CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
  135. CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
  136. UntiedSwitch->addCase(CGF.Builder.getInt32(0),
  137. CGF.Builder.GetInsertBlock());
  138. emitUntiedSwitch(CGF);
  139. }
  140. }
  141. void emitUntiedSwitch(CodeGenFunction &CGF) const {
  142. if (Untied) {
  143. LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
  144. CGF.GetAddrOfLocalVar(PartIDVar),
  145. PartIDVar->getType()->castAs<PointerType>());
  146. CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
  147. PartIdLVal);
  148. UntiedCodeGen(CGF);
  149. CodeGenFunction::JumpDest CurPoint =
  150. CGF.getJumpDestInCurrentScope(".untied.next.");
  151. CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
  152. CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
  153. UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
  154. CGF.Builder.GetInsertBlock());
  155. CGF.EmitBranchThroughCleanup(CurPoint);
  156. CGF.EmitBlock(CurPoint.getBlock());
  157. }
  158. }
  159. unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
  160. };
  161. CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
  162. const VarDecl *ThreadIDVar,
  163. const RegionCodeGenTy &CodeGen,
  164. OpenMPDirectiveKind Kind, bool HasCancel,
  165. const UntiedTaskActionTy &Action)
  166. : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
  167. ThreadIDVar(ThreadIDVar), Action(Action) {
  168. assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
  169. }
  170. /// Get a variable or parameter for storing global thread id
  171. /// inside OpenMP construct.
  172. const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
  173. /// Get an LValue for the current ThreadID variable.
  174. LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
  175. /// Get the name of the capture helper.
  176. StringRef getHelperName() const override { return ".omp_outlined."; }
  177. void emitUntiedSwitch(CodeGenFunction &CGF) override {
  178. Action.emitUntiedSwitch(CGF);
  179. }
  180. static bool classof(const CGCapturedStmtInfo *Info) {
  181. return CGOpenMPRegionInfo::classof(Info) &&
  182. cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
  183. TaskOutlinedRegion;
  184. }
  185. private:
  186. /// A variable or parameter storing global thread id for OpenMP
  187. /// constructs.
  188. const VarDecl *ThreadIDVar;
  189. /// Action for emitting code for untied tasks.
  190. const UntiedTaskActionTy &Action;
  191. };
  192. /// API for inlined captured statement code generation in OpenMP
  193. /// constructs.
  194. class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
  195. public:
  196. CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
  197. const RegionCodeGenTy &CodeGen,
  198. OpenMPDirectiveKind Kind, bool HasCancel)
  199. : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
  200. OldCSI(OldCSI),
  201. OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
  202. // Retrieve the value of the context parameter.
  203. llvm::Value *getContextValue() const override {
  204. if (OuterRegionInfo)
  205. return OuterRegionInfo->getContextValue();
  206. llvm_unreachable("No context value for inlined OpenMP region");
  207. }
  208. void setContextValue(llvm::Value *V) override {
  209. if (OuterRegionInfo) {
  210. OuterRegionInfo->setContextValue(V);
  211. return;
  212. }
  213. llvm_unreachable("No context value for inlined OpenMP region");
  214. }
  215. /// Lookup the captured field decl for a variable.
  216. const FieldDecl *lookup(const VarDecl *VD) const override {
  217. if (OuterRegionInfo)
  218. return OuterRegionInfo->lookup(VD);
  219. // If there is no outer outlined region,no need to lookup in a list of
  220. // captured variables, we can use the original one.
  221. return nullptr;
  222. }
  223. FieldDecl *getThisFieldDecl() const override {
  224. if (OuterRegionInfo)
  225. return OuterRegionInfo->getThisFieldDecl();
  226. return nullptr;
  227. }
  228. /// Get a variable or parameter for storing global thread id
  229. /// inside OpenMP construct.
  230. const VarDecl *getThreadIDVariable() const override {
  231. if (OuterRegionInfo)
  232. return OuterRegionInfo->getThreadIDVariable();
  233. return nullptr;
  234. }
  235. /// Get an LValue for the current ThreadID variable.
  236. LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
  237. if (OuterRegionInfo)
  238. return OuterRegionInfo->getThreadIDVariableLValue(CGF);
  239. llvm_unreachable("No LValue for inlined OpenMP construct");
  240. }
  241. /// Get the name of the capture helper.
  242. StringRef getHelperName() const override {
  243. if (auto *OuterRegionInfo = getOldCSI())
  244. return OuterRegionInfo->getHelperName();
  245. llvm_unreachable("No helper name for inlined OpenMP construct");
  246. }
  247. void emitUntiedSwitch(CodeGenFunction &CGF) override {
  248. if (OuterRegionInfo)
  249. OuterRegionInfo->emitUntiedSwitch(CGF);
  250. }
  251. CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
  252. static bool classof(const CGCapturedStmtInfo *Info) {
  253. return CGOpenMPRegionInfo::classof(Info) &&
  254. cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
  255. }
  256. ~CGOpenMPInlinedRegionInfo() override = default;
  257. private:
  258. /// CodeGen info about outer OpenMP region.
  259. CodeGenFunction::CGCapturedStmtInfo *OldCSI;
  260. CGOpenMPRegionInfo *OuterRegionInfo;
  261. };
  262. /// API for captured statement code generation in OpenMP target
  263. /// constructs. For this captures, implicit parameters are used instead of the
  264. /// captured fields. The name of the target region has to be unique in a given
  265. /// application so it is provided by the client, because only the client has
  266. /// the information to generate that.
  267. class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
  268. public:
  269. CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
  270. const RegionCodeGenTy &CodeGen, StringRef HelperName)
  271. : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
  272. /*HasCancel=*/false),
  273. HelperName(HelperName) {}
  274. /// This is unused for target regions because each starts executing
  275. /// with a single thread.
  276. const VarDecl *getThreadIDVariable() const override { return nullptr; }
  277. /// Get the name of the capture helper.
  278. StringRef getHelperName() const override { return HelperName; }
  279. static bool classof(const CGCapturedStmtInfo *Info) {
  280. return CGOpenMPRegionInfo::classof(Info) &&
  281. cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
  282. }
  283. private:
  284. StringRef HelperName;
  285. };
  286. static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
  287. llvm_unreachable("No codegen for expressions");
  288. }
  289. /// API for generation of expressions captured in a innermost OpenMP
  290. /// region.
  291. class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
  292. public:
  293. CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
  294. : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
  295. OMPD_unknown,
  296. /*HasCancel=*/false),
  297. PrivScope(CGF) {
  298. // Make sure the globals captured in the provided statement are local by
  299. // using the privatization logic. We assume the same variable is not
  300. // captured more than once.
  301. for (const auto &C : CS.captures()) {
  302. if (!C.capturesVariable() && !C.capturesVariableByCopy())
  303. continue;
  304. const VarDecl *VD = C.getCapturedVar();
  305. if (VD->isLocalVarDeclOrParm())
  306. continue;
  307. DeclRefExpr DRE(const_cast<VarDecl *>(VD),
  308. /*RefersToEnclosingVariableOrCapture=*/false,
  309. VD->getType().getNonReferenceType(), VK_LValue,
  310. C.getLocation());
  311. PrivScope.addPrivate(
  312. VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
  313. }
  314. (void)PrivScope.Privatize();
  315. }
  316. /// Lookup the captured field decl for a variable.
  317. const FieldDecl *lookup(const VarDecl *VD) const override {
  318. if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
  319. return FD;
  320. return nullptr;
  321. }
  322. /// Emit the captured statement body.
  323. void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
  324. llvm_unreachable("No body for expressions");
  325. }
  326. /// Get a variable or parameter for storing global thread id
  327. /// inside OpenMP construct.
  328. const VarDecl *getThreadIDVariable() const override {
  329. llvm_unreachable("No thread id for expressions");
  330. }
  331. /// Get the name of the capture helper.
  332. StringRef getHelperName() const override {
  333. llvm_unreachable("No helper name for expressions");
  334. }
  335. static bool classof(const CGCapturedStmtInfo *Info) { return false; }
  336. private:
  337. /// Private scope to capture global variables.
  338. CodeGenFunction::OMPPrivateScope PrivScope;
  339. };
  340. /// RAII for emitting code of OpenMP constructs.
  341. class InlinedOpenMPRegionRAII {
  342. CodeGenFunction &CGF;
  343. llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
  344. FieldDecl *LambdaThisCaptureField = nullptr;
  345. const CodeGen::CGBlockInfo *BlockInfo = nullptr;
  346. public:
  347. /// Constructs region for combined constructs.
  348. /// \param CodeGen Code generation sequence for combined directives. Includes
  349. /// a list of functions used for code generation of implicitly inlined
  350. /// regions.
  351. InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
  352. OpenMPDirectiveKind Kind, bool HasCancel)
  353. : CGF(CGF) {
  354. // Start emission for the construct.
  355. CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
  356. CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
  357. std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
  358. LambdaThisCaptureField = CGF.LambdaThisCaptureField;
  359. CGF.LambdaThisCaptureField = nullptr;
  360. BlockInfo = CGF.BlockInfo;
  361. CGF.BlockInfo = nullptr;
  362. }
  363. ~InlinedOpenMPRegionRAII() {
  364. // Restore original CapturedStmtInfo only if we're done with code emission.
  365. auto *OldCSI =
  366. cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
  367. delete CGF.CapturedStmtInfo;
  368. CGF.CapturedStmtInfo = OldCSI;
  369. std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
  370. CGF.LambdaThisCaptureField = LambdaThisCaptureField;
  371. CGF.BlockInfo = BlockInfo;
  372. }
  373. };
  374. /// Values for bit flags used in the ident_t to describe the fields.
  375. /// All enumeric elements are named and described in accordance with the code
  376. /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
  377. enum OpenMPLocationFlags : unsigned {
  378. /// Use trampoline for internal microtask.
  379. OMP_IDENT_IMD = 0x01,
  380. /// Use c-style ident structure.
  381. OMP_IDENT_KMPC = 0x02,
  382. /// Atomic reduction option for kmpc_reduce.
  383. OMP_ATOMIC_REDUCE = 0x10,
  384. /// Explicit 'barrier' directive.
  385. OMP_IDENT_BARRIER_EXPL = 0x20,
  386. /// Implicit barrier in code.
  387. OMP_IDENT_BARRIER_IMPL = 0x40,
  388. /// Implicit barrier in 'for' directive.
  389. OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
  390. /// Implicit barrier in 'sections' directive.
  391. OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
  392. /// Implicit barrier in 'single' directive.
  393. OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
  394. /// Call of __kmp_for_static_init for static loop.
  395. OMP_IDENT_WORK_LOOP = 0x200,
  396. /// Call of __kmp_for_static_init for sections.
  397. OMP_IDENT_WORK_SECTIONS = 0x400,
  398. /// Call of __kmp_for_static_init for distribute.
  399. OMP_IDENT_WORK_DISTRIBUTE = 0x800,
  400. LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
  401. };
  402. /// Describes ident structure that describes a source location.
  403. /// All descriptions are taken from
  404. /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
  405. /// Original structure:
  406. /// typedef struct ident {
  407. /// kmp_int32 reserved_1; /**< might be used in Fortran;
  408. /// see above */
  409. /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
  410. /// KMP_IDENT_KMPC identifies this union
  411. /// member */
  412. /// kmp_int32 reserved_2; /**< not really used in Fortran any more;
  413. /// see above */
  414. ///#if USE_ITT_BUILD
  415. /// /* but currently used for storing
  416. /// region-specific ITT */
  417. /// /* contextual information. */
  418. ///#endif /* USE_ITT_BUILD */
  419. /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
  420. /// C++ */
  421. /// char const *psource; /**< String describing the source location.
  422. /// The string is composed of semi-colon separated
  423. // fields which describe the source file,
  424. /// the function and a pair of line numbers that
  425. /// delimit the construct.
  426. /// */
  427. /// } ident_t;
  428. enum IdentFieldIndex {
  429. /// might be used in Fortran
  430. IdentField_Reserved_1,
  431. /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
  432. IdentField_Flags,
  433. /// Not really used in Fortran any more
  434. IdentField_Reserved_2,
  435. /// Source[4] in Fortran, do not use for C++
  436. IdentField_Reserved_3,
  437. /// String describing the source location. The string is composed of
  438. /// semi-colon separated fields which describe the source file, the function
  439. /// and a pair of line numbers that delimit the construct.
  440. IdentField_PSource
  441. };
  442. /// Schedule types for 'omp for' loops (these enumerators are taken from
  443. /// the enum sched_type in kmp.h).
  444. enum OpenMPSchedType {
  445. /// Lower bound for default (unordered) versions.
  446. OMP_sch_lower = 32,
  447. OMP_sch_static_chunked = 33,
  448. OMP_sch_static = 34,
  449. OMP_sch_dynamic_chunked = 35,
  450. OMP_sch_guided_chunked = 36,
  451. OMP_sch_runtime = 37,
  452. OMP_sch_auto = 38,
  453. /// static with chunk adjustment (e.g., simd)
  454. OMP_sch_static_balanced_chunked = 45,
  455. /// Lower bound for 'ordered' versions.
  456. OMP_ord_lower = 64,
  457. OMP_ord_static_chunked = 65,
  458. OMP_ord_static = 66,
  459. OMP_ord_dynamic_chunked = 67,
  460. OMP_ord_guided_chunked = 68,
  461. OMP_ord_runtime = 69,
  462. OMP_ord_auto = 70,
  463. OMP_sch_default = OMP_sch_static,
  464. /// dist_schedule types
  465. OMP_dist_sch_static_chunked = 91,
  466. OMP_dist_sch_static = 92,
  467. /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
  468. /// Set if the monotonic schedule modifier was present.
  469. OMP_sch_modifier_monotonic = (1 << 29),
  470. /// Set if the nonmonotonic schedule modifier was present.
  471. OMP_sch_modifier_nonmonotonic = (1 << 30),
  472. };
  473. enum OpenMPRTLFunction {
  474. /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
  475. /// kmpc_micro microtask, ...);
  476. OMPRTL__kmpc_fork_call,
  477. /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
  478. /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
  479. OMPRTL__kmpc_threadprivate_cached,
  480. /// Call to void __kmpc_threadprivate_register( ident_t *,
  481. /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
  482. OMPRTL__kmpc_threadprivate_register,
  483. // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
  484. OMPRTL__kmpc_global_thread_num,
  485. // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
  486. // kmp_critical_name *crit);
  487. OMPRTL__kmpc_critical,
  488. // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
  489. // global_tid, kmp_critical_name *crit, uintptr_t hint);
  490. OMPRTL__kmpc_critical_with_hint,
  491. // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
  492. // kmp_critical_name *crit);
  493. OMPRTL__kmpc_end_critical,
  494. // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
  495. // global_tid);
  496. OMPRTL__kmpc_cancel_barrier,
  497. // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
  498. OMPRTL__kmpc_barrier,
  499. // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
  500. OMPRTL__kmpc_for_static_fini,
  501. // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
  502. // global_tid);
  503. OMPRTL__kmpc_serialized_parallel,
  504. // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
  505. // global_tid);
  506. OMPRTL__kmpc_end_serialized_parallel,
  507. // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
  508. // kmp_int32 num_threads);
  509. OMPRTL__kmpc_push_num_threads,
  510. // Call to void __kmpc_flush(ident_t *loc);
  511. OMPRTL__kmpc_flush,
  512. // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
  513. OMPRTL__kmpc_master,
  514. // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
  515. OMPRTL__kmpc_end_master,
  516. // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
  517. // int end_part);
  518. OMPRTL__kmpc_omp_taskyield,
  519. // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
  520. OMPRTL__kmpc_single,
  521. // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
  522. OMPRTL__kmpc_end_single,
  523. // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
  524. // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
  525. // kmp_routine_entry_t *task_entry);
  526. OMPRTL__kmpc_omp_task_alloc,
  527. // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
  528. // new_task);
  529. OMPRTL__kmpc_omp_task,
  530. // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
  531. // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
  532. // kmp_int32 didit);
  533. OMPRTL__kmpc_copyprivate,
  534. // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
  535. // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
  536. // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
  537. OMPRTL__kmpc_reduce,
  538. // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
  539. // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
  540. // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
  541. // *lck);
  542. OMPRTL__kmpc_reduce_nowait,
  543. // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
  544. // kmp_critical_name *lck);
  545. OMPRTL__kmpc_end_reduce,
  546. // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
  547. // kmp_critical_name *lck);
  548. OMPRTL__kmpc_end_reduce_nowait,
  549. // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
  550. // kmp_task_t * new_task);
  551. OMPRTL__kmpc_omp_task_begin_if0,
  552. // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
  553. // kmp_task_t * new_task);
  554. OMPRTL__kmpc_omp_task_complete_if0,
  555. // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
  556. OMPRTL__kmpc_ordered,
  557. // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
  558. OMPRTL__kmpc_end_ordered,
  559. // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
  560. // global_tid);
  561. OMPRTL__kmpc_omp_taskwait,
  562. // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
  563. OMPRTL__kmpc_taskgroup,
  564. // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
  565. OMPRTL__kmpc_end_taskgroup,
  566. // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
  567. // int proc_bind);
  568. OMPRTL__kmpc_push_proc_bind,
  569. // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
  570. // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
  571. // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
  572. OMPRTL__kmpc_omp_task_with_deps,
  573. // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
  574. // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
  575. // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
  576. OMPRTL__kmpc_omp_wait_deps,
  577. // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
  578. // global_tid, kmp_int32 cncl_kind);
  579. OMPRTL__kmpc_cancellationpoint,
  580. // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
  581. // kmp_int32 cncl_kind);
  582. OMPRTL__kmpc_cancel,
  583. // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
  584. // kmp_int32 num_teams, kmp_int32 thread_limit);
  585. OMPRTL__kmpc_push_num_teams,
  586. // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
  587. // microtask, ...);
  588. OMPRTL__kmpc_fork_teams,
  589. // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
  590. // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
  591. // sched, kmp_uint64 grainsize, void *task_dup);
  592. OMPRTL__kmpc_taskloop,
  593. // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
  594. // num_dims, struct kmp_dim *dims);
  595. OMPRTL__kmpc_doacross_init,
  596. // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
  597. OMPRTL__kmpc_doacross_fini,
  598. // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
  599. // *vec);
  600. OMPRTL__kmpc_doacross_post,
  601. // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
  602. // *vec);
  603. OMPRTL__kmpc_doacross_wait,
  604. // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
  605. // *data);
  606. OMPRTL__kmpc_task_reduction_init,
  607. // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
  608. // *d);
  609. OMPRTL__kmpc_task_reduction_get_th_data,
  610. //
  611. // Offloading related calls
  612. //
  613. // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
  614. // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  615. // *arg_types);
  616. OMPRTL__tgt_target,
  617. // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
  618. // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  619. // *arg_types);
  620. OMPRTL__tgt_target_nowait,
  621. // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
  622. // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  623. // *arg_types, int32_t num_teams, int32_t thread_limit);
  624. OMPRTL__tgt_target_teams,
  625. // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
  626. // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
  627. // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
  628. OMPRTL__tgt_target_teams_nowait,
  629. // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
  630. OMPRTL__tgt_register_lib,
  631. // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
  632. OMPRTL__tgt_unregister_lib,
  633. // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
  634. // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
  635. OMPRTL__tgt_target_data_begin,
  636. // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
  637. // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  638. // *arg_types);
  639. OMPRTL__tgt_target_data_begin_nowait,
  640. // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
  641. // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
  642. OMPRTL__tgt_target_data_end,
  643. // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
  644. // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  645. // *arg_types);
  646. OMPRTL__tgt_target_data_end_nowait,
  647. // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
  648. // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
  649. OMPRTL__tgt_target_data_update,
  650. // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
  651. // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  652. // *arg_types);
  653. OMPRTL__tgt_target_data_update_nowait,
  654. };
  655. /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
  656. /// region.
  657. class CleanupTy final : public EHScopeStack::Cleanup {
  658. PrePostActionTy *Action;
  659. public:
  660. explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
  661. void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
  662. if (!CGF.HaveInsertPoint())
  663. return;
  664. Action->Exit(CGF);
  665. }
  666. };
  667. } // anonymous namespace
  668. void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
  669. CodeGenFunction::RunCleanupsScope Scope(CGF);
  670. if (PrePostAction) {
  671. CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
  672. Callback(CodeGen, CGF, *PrePostAction);
  673. } else {
  674. PrePostActionTy Action;
  675. Callback(CodeGen, CGF, Action);
  676. }
  677. }
  678. /// Check if the combiner is a call to UDR combiner and if it is so return the
  679. /// UDR decl used for reduction.
  680. static const OMPDeclareReductionDecl *
  681. getReductionInit(const Expr *ReductionOp) {
  682. if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
  683. if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
  684. if (const auto *DRE =
  685. dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
  686. if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
  687. return DRD;
  688. return nullptr;
  689. }
  690. static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
  691. const OMPDeclareReductionDecl *DRD,
  692. const Expr *InitOp,
  693. Address Private, Address Original,
  694. QualType Ty) {
  695. if (DRD->getInitializer()) {
  696. std::pair<llvm::Function *, llvm::Function *> Reduction =
  697. CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
  698. const auto *CE = cast<CallExpr>(InitOp);
  699. const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
  700. const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
  701. const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
  702. const auto *LHSDRE =
  703. cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
  704. const auto *RHSDRE =
  705. cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
  706. CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
  707. PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
  708. [=]() { return Private; });
  709. PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
  710. [=]() { return Original; });
  711. (void)PrivateScope.Privatize();
  712. RValue Func = RValue::get(Reduction.second);
  713. CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
  714. CGF.EmitIgnoredExpr(InitOp);
  715. } else {
  716. llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
  717. std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
  718. auto *GV = new llvm::GlobalVariable(
  719. CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
  720. llvm::GlobalValue::PrivateLinkage, Init, Name);
  721. LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
  722. RValue InitRVal;
  723. switch (CGF.getEvaluationKind(Ty)) {
  724. case TEK_Scalar:
  725. InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
  726. break;
  727. case TEK_Complex:
  728. InitRVal =
  729. RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
  730. break;
  731. case TEK_Aggregate:
  732. InitRVal = RValue::getAggregate(LV.getAddress());
  733. break;
  734. }
  735. OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
  736. CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
  737. CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
  738. /*IsInitializer=*/false);
  739. }
  740. }
  741. /// Emit initialization of arrays of complex types.
  742. /// \param DestAddr Address of the array.
  743. /// \param Type Type of array.
  744. /// \param Init Initial expression of array.
  745. /// \param SrcAddr Address of the original array.
  746. static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
  747. QualType Type, bool EmitDeclareReductionInit,
  748. const Expr *Init,
  749. const OMPDeclareReductionDecl *DRD,
  750. Address SrcAddr = Address::invalid()) {
  751. // Perform element-by-element initialization.
  752. QualType ElementTy;
  753. // Drill down to the base element type on both arrays.
  754. const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
  755. llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
  756. DestAddr =
  757. CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
  758. if (DRD)
  759. SrcAddr =
  760. CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
  761. llvm::Value *SrcBegin = nullptr;
  762. if (DRD)
  763. SrcBegin = SrcAddr.getPointer();
  764. llvm::Value *DestBegin = DestAddr.getPointer();
  765. // Cast from pointer to array type to pointer to single element.
  766. llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
  767. // The basic structure here is a while-do loop.
  768. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
  769. llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
  770. llvm::Value *IsEmpty =
  771. CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
  772. CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
  773. // Enter the loop body, making that address the current address.
  774. llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
  775. CGF.EmitBlock(BodyBB);
  776. CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
  777. llvm::PHINode *SrcElementPHI = nullptr;
  778. Address SrcElementCurrent = Address::invalid();
  779. if (DRD) {
  780. SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
  781. "omp.arraycpy.srcElementPast");
  782. SrcElementPHI->addIncoming(SrcBegin, EntryBB);
  783. SrcElementCurrent =
  784. Address(SrcElementPHI,
  785. SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
  786. }
  787. llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
  788. DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
  789. DestElementPHI->addIncoming(DestBegin, EntryBB);
  790. Address DestElementCurrent =
  791. Address(DestElementPHI,
  792. DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
  793. // Emit copy.
  794. {
  795. CodeGenFunction::RunCleanupsScope InitScope(CGF);
  796. if (EmitDeclareReductionInit) {
  797. emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
  798. SrcElementCurrent, ElementTy);
  799. } else
  800. CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
  801. /*IsInitializer=*/false);
  802. }
  803. if (DRD) {
  804. // Shift the address forward by one element.
  805. llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
  806. SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
  807. SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
  808. }
  809. // Shift the address forward by one element.
  810. llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
  811. DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
  812. // Check whether we've reached the end.
  813. llvm::Value *Done =
  814. CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
  815. CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
  816. DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
  817. // Done.
  818. CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
  819. }
  820. LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
  821. return CGF.EmitOMPSharedLValue(E);
  822. }
  823. LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
  824. const Expr *E) {
  825. if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
  826. return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
  827. return LValue();
  828. }
  829. void ReductionCodeGen::emitAggregateInitialization(
  830. CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
  831. const OMPDeclareReductionDecl *DRD) {
  832. // Emit VarDecl with copy init for arrays.
  833. // Get the address of the original variable captured in current
  834. // captured region.
  835. const auto *PrivateVD =
  836. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  837. bool EmitDeclareReductionInit =
  838. DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
  839. EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
  840. EmitDeclareReductionInit,
  841. EmitDeclareReductionInit ? ClausesData[N].ReductionOp
  842. : PrivateVD->getInit(),
  843. DRD, SharedLVal.getAddress());
  844. }
  845. ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
  846. ArrayRef<const Expr *> Privates,
  847. ArrayRef<const Expr *> ReductionOps) {
  848. ClausesData.reserve(Shareds.size());
  849. SharedAddresses.reserve(Shareds.size());
  850. Sizes.reserve(Shareds.size());
  851. BaseDecls.reserve(Shareds.size());
  852. auto IPriv = Privates.begin();
  853. auto IRed = ReductionOps.begin();
  854. for (const Expr *Ref : Shareds) {
  855. ClausesData.emplace_back(Ref, *IPriv, *IRed);
  856. std::advance(IPriv, 1);
  857. std::advance(IRed, 1);
  858. }
  859. }
  860. void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
  861. assert(SharedAddresses.size() == N &&
  862. "Number of generated lvalues must be exactly N.");
  863. LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
  864. LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
  865. SharedAddresses.emplace_back(First, Second);
  866. }
  867. void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
  868. const auto *PrivateVD =
  869. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  870. QualType PrivateType = PrivateVD->getType();
  871. bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
  872. if (!PrivateType->isVariablyModifiedType()) {
  873. Sizes.emplace_back(
  874. CGF.getTypeSize(
  875. SharedAddresses[N].first.getType().getNonReferenceType()),
  876. nullptr);
  877. return;
  878. }
  879. llvm::Value *Size;
  880. llvm::Value *SizeInChars;
  881. auto *ElemType =
  882. cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
  883. ->getElementType();
  884. auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
  885. if (AsArraySection) {
  886. Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
  887. SharedAddresses[N].first.getPointer());
  888. Size = CGF.Builder.CreateNUWAdd(
  889. Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
  890. SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
  891. } else {
  892. SizeInChars = CGF.getTypeSize(
  893. SharedAddresses[N].first.getType().getNonReferenceType());
  894. Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
  895. }
  896. Sizes.emplace_back(SizeInChars, Size);
  897. CodeGenFunction::OpaqueValueMapping OpaqueMap(
  898. CGF,
  899. cast<OpaqueValueExpr>(
  900. CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
  901. RValue::get(Size));
  902. CGF.EmitVariablyModifiedType(PrivateType);
  903. }
  904. void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
  905. llvm::Value *Size) {
  906. const auto *PrivateVD =
  907. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  908. QualType PrivateType = PrivateVD->getType();
  909. if (!PrivateType->isVariablyModifiedType()) {
  910. assert(!Size && !Sizes[N].second &&
  911. "Size should be nullptr for non-variably modified reduction "
  912. "items.");
  913. return;
  914. }
  915. CodeGenFunction::OpaqueValueMapping OpaqueMap(
  916. CGF,
  917. cast<OpaqueValueExpr>(
  918. CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
  919. RValue::get(Size));
  920. CGF.EmitVariablyModifiedType(PrivateType);
  921. }
  922. void ReductionCodeGen::emitInitialization(
  923. CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
  924. llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
  925. assert(SharedAddresses.size() > N && "No variable was generated");
  926. const auto *PrivateVD =
  927. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  928. const OMPDeclareReductionDecl *DRD =
  929. getReductionInit(ClausesData[N].ReductionOp);
  930. QualType PrivateType = PrivateVD->getType();
  931. PrivateAddr = CGF.Builder.CreateElementBitCast(
  932. PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
  933. QualType SharedType = SharedAddresses[N].first.getType();
  934. SharedLVal = CGF.MakeAddrLValue(
  935. CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
  936. CGF.ConvertTypeForMem(SharedType)),
  937. SharedType, SharedAddresses[N].first.getBaseInfo(),
  938. CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
  939. if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
  940. emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
  941. } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
  942. emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
  943. PrivateAddr, SharedLVal.getAddress(),
  944. SharedLVal.getType());
  945. } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
  946. !CGF.isTrivialInitializer(PrivateVD->getInit())) {
  947. CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
  948. PrivateVD->getType().getQualifiers(),
  949. /*IsInitializer=*/false);
  950. }
  951. }
  952. bool ReductionCodeGen::needCleanups(unsigned N) {
  953. const auto *PrivateVD =
  954. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  955. QualType PrivateType = PrivateVD->getType();
  956. QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
  957. return DTorKind != QualType::DK_none;
  958. }
  959. void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
  960. Address PrivateAddr) {
  961. const auto *PrivateVD =
  962. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
  963. QualType PrivateType = PrivateVD->getType();
  964. QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
  965. if (needCleanups(N)) {
  966. PrivateAddr = CGF.Builder.CreateElementBitCast(
  967. PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
  968. CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
  969. }
  970. }
  971. static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
  972. LValue BaseLV) {
  973. BaseTy = BaseTy.getNonReferenceType();
  974. while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
  975. !CGF.getContext().hasSameType(BaseTy, ElTy)) {
  976. if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
  977. BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
  978. } else {
  979. LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
  980. BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
  981. }
  982. BaseTy = BaseTy->getPointeeType();
  983. }
  984. return CGF.MakeAddrLValue(
  985. CGF.Builder.CreateElementBitCast(BaseLV.getAddress(),
  986. CGF.ConvertTypeForMem(ElTy)),
  987. BaseLV.getType(), BaseLV.getBaseInfo(),
  988. CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
  989. }
  990. static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
  991. llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
  992. llvm::Value *Addr) {
  993. Address Tmp = Address::invalid();
  994. Address TopTmp = Address::invalid();
  995. Address MostTopTmp = Address::invalid();
  996. BaseTy = BaseTy.getNonReferenceType();
  997. while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
  998. !CGF.getContext().hasSameType(BaseTy, ElTy)) {
  999. Tmp = CGF.CreateMemTemp(BaseTy);
  1000. if (TopTmp.isValid())
  1001. CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
  1002. else
  1003. MostTopTmp = Tmp;
  1004. TopTmp = Tmp;
  1005. BaseTy = BaseTy->getPointeeType();
  1006. }
  1007. llvm::Type *Ty = BaseLVType;
  1008. if (Tmp.isValid())
  1009. Ty = Tmp.getElementType();
  1010. Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
  1011. if (Tmp.isValid()) {
  1012. CGF.Builder.CreateStore(Addr, Tmp);
  1013. return MostTopTmp;
  1014. }
  1015. return Address(Addr, BaseLVAlignment);
  1016. }
  1017. static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
  1018. const VarDecl *OrigVD = nullptr;
  1019. if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
  1020. const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
  1021. while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
  1022. Base = TempOASE->getBase()->IgnoreParenImpCasts();
  1023. while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
  1024. Base = TempASE->getBase()->IgnoreParenImpCasts();
  1025. DE = cast<DeclRefExpr>(Base);
  1026. OrigVD = cast<VarDecl>(DE->getDecl());
  1027. } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
  1028. const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
  1029. while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
  1030. Base = TempASE->getBase()->IgnoreParenImpCasts();
  1031. DE = cast<DeclRefExpr>(Base);
  1032. OrigVD = cast<VarDecl>(DE->getDecl());
  1033. }
  1034. return OrigVD;
  1035. }
  1036. Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
  1037. Address PrivateAddr) {
  1038. const DeclRefExpr *DE;
  1039. if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
  1040. BaseDecls.emplace_back(OrigVD);
  1041. LValue OriginalBaseLValue = CGF.EmitLValue(DE);
  1042. LValue BaseLValue =
  1043. loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
  1044. OriginalBaseLValue);
  1045. llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
  1046. BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
  1047. llvm::Value *PrivatePointer =
  1048. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  1049. PrivateAddr.getPointer(),
  1050. SharedAddresses[N].first.getAddress().getType());
  1051. llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
  1052. return castToBase(CGF, OrigVD->getType(),
  1053. SharedAddresses[N].first.getType(),
  1054. OriginalBaseLValue.getAddress().getType(),
  1055. OriginalBaseLValue.getAlignment(), Ptr);
  1056. }
  1057. BaseDecls.emplace_back(
  1058. cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
  1059. return PrivateAddr;
  1060. }
  1061. bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
  1062. const OMPDeclareReductionDecl *DRD =
  1063. getReductionInit(ClausesData[N].ReductionOp);
  1064. return DRD && DRD->getInitializer();
  1065. }
  1066. LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
  1067. return CGF.EmitLoadOfPointerLValue(
  1068. CGF.GetAddrOfLocalVar(getThreadIDVariable()),
  1069. getThreadIDVariable()->getType()->castAs<PointerType>());
  1070. }
  1071. void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
  1072. if (!CGF.HaveInsertPoint())
  1073. return;
  1074. // 1.2.2 OpenMP Language Terminology
  1075. // Structured block - An executable statement with a single entry at the
  1076. // top and a single exit at the bottom.
  1077. // The point of exit cannot be a branch out of the structured block.
  1078. // longjmp() and throw() must not violate the entry/exit criteria.
  1079. CGF.EHStack.pushTerminate();
  1080. CodeGen(CGF);
  1081. CGF.EHStack.popTerminate();
  1082. }
  1083. LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
  1084. CodeGenFunction &CGF) {
  1085. return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
  1086. getThreadIDVariable()->getType(),
  1087. AlignmentSource::Decl);
  1088. }
  1089. static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
  1090. QualType FieldTy) {
  1091. auto *Field = FieldDecl::Create(
  1092. C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
  1093. C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
  1094. /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
  1095. Field->setAccess(AS_public);
  1096. DC->addDecl(Field);
  1097. return Field;
  1098. }
  1099. CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
  1100. StringRef Separator)
  1101. : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
  1102. OffloadEntriesInfoManager(CGM) {
  1103. ASTContext &C = CGM.getContext();
  1104. RecordDecl *RD = C.buildImplicitRecord("ident_t");
  1105. QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
  1106. RD->startDefinition();
  1107. // reserved_1
  1108. addFieldToRecordDecl(C, RD, KmpInt32Ty);
  1109. // flags
  1110. addFieldToRecordDecl(C, RD, KmpInt32Ty);
  1111. // reserved_2
  1112. addFieldToRecordDecl(C, RD, KmpInt32Ty);
  1113. // reserved_3
  1114. addFieldToRecordDecl(C, RD, KmpInt32Ty);
  1115. // psource
  1116. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  1117. RD->completeDefinition();
  1118. IdentQTy = C.getRecordType(RD);
  1119. IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
  1120. KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
  1121. loadOffloadInfoMetadata();
  1122. }
  1123. void CGOpenMPRuntime::clear() {
  1124. InternalVars.clear();
  1125. // Clean non-target variable declarations possibly used only in debug info.
  1126. for (const auto &Data : EmittedNonTargetVariables) {
  1127. if (!Data.getValue().pointsToAliveValue())
  1128. continue;
  1129. auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
  1130. if (!GV)
  1131. continue;
  1132. if (!GV->isDeclaration() || GV->getNumUses() > 0)
  1133. continue;
  1134. GV->eraseFromParent();
  1135. }
  1136. }
  1137. std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
  1138. SmallString<128> Buffer;
  1139. llvm::raw_svector_ostream OS(Buffer);
  1140. StringRef Sep = FirstSeparator;
  1141. for (StringRef Part : Parts) {
  1142. OS << Sep << Part;
  1143. Sep = Separator;
  1144. }
  1145. return OS.str();
  1146. }
  1147. static llvm::Function *
  1148. emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
  1149. const Expr *CombinerInitializer, const VarDecl *In,
  1150. const VarDecl *Out, bool IsCombiner) {
  1151. // void .omp_combiner.(Ty *in, Ty *out);
  1152. ASTContext &C = CGM.getContext();
  1153. QualType PtrTy = C.getPointerType(Ty).withRestrict();
  1154. FunctionArgList Args;
  1155. ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
  1156. /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
  1157. ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
  1158. /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
  1159. Args.push_back(&OmpOutParm);
  1160. Args.push_back(&OmpInParm);
  1161. const CGFunctionInfo &FnInfo =
  1162. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  1163. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  1164. std::string Name = CGM.getOpenMPRuntime().getName(
  1165. {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
  1166. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  1167. Name, &CGM.getModule());
  1168. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  1169. Fn->removeFnAttr(llvm::Attribute::NoInline);
  1170. Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
  1171. Fn->addFnAttr(llvm::Attribute::AlwaysInline);
  1172. CodeGenFunction CGF(CGM);
  1173. // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
  1174. // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
  1175. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
  1176. Out->getLocation());
  1177. CodeGenFunction::OMPPrivateScope Scope(CGF);
  1178. Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
  1179. Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
  1180. return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
  1181. .getAddress();
  1182. });
  1183. Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
  1184. Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
  1185. return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
  1186. .getAddress();
  1187. });
  1188. (void)Scope.Privatize();
  1189. if (!IsCombiner && Out->hasInit() &&
  1190. !CGF.isTrivialInitializer(Out->getInit())) {
  1191. CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
  1192. Out->getType().getQualifiers(),
  1193. /*IsInitializer=*/true);
  1194. }
  1195. if (CombinerInitializer)
  1196. CGF.EmitIgnoredExpr(CombinerInitializer);
  1197. Scope.ForceCleanup();
  1198. CGF.FinishFunction();
  1199. return Fn;
  1200. }
  1201. void CGOpenMPRuntime::emitUserDefinedReduction(
  1202. CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
  1203. if (UDRMap.count(D) > 0)
  1204. return;
  1205. llvm::Function *Combiner = emitCombinerOrInitializer(
  1206. CGM, D->getType(), D->getCombiner(),
  1207. cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
  1208. cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
  1209. /*IsCombiner=*/true);
  1210. llvm::Function *Initializer = nullptr;
  1211. if (const Expr *Init = D->getInitializer()) {
  1212. Initializer = emitCombinerOrInitializer(
  1213. CGM, D->getType(),
  1214. D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
  1215. : nullptr,
  1216. cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
  1217. cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
  1218. /*IsCombiner=*/false);
  1219. }
  1220. UDRMap.try_emplace(D, Combiner, Initializer);
  1221. if (CGF) {
  1222. auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
  1223. Decls.second.push_back(D);
  1224. }
  1225. }
  1226. std::pair<llvm::Function *, llvm::Function *>
  1227. CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
  1228. auto I = UDRMap.find(D);
  1229. if (I != UDRMap.end())
  1230. return I->second;
  1231. emitUserDefinedReduction(/*CGF=*/nullptr, D);
  1232. return UDRMap.lookup(D);
  1233. }
  1234. static llvm::Value *emitParallelOrTeamsOutlinedFunction(
  1235. CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
  1236. const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
  1237. const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
  1238. assert(ThreadIDVar->getType()->isPointerType() &&
  1239. "thread id variable must be of type kmp_int32 *");
  1240. CodeGenFunction CGF(CGM, true);
  1241. bool HasCancel = false;
  1242. if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
  1243. HasCancel = OPD->hasCancel();
  1244. else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
  1245. HasCancel = OPSD->hasCancel();
  1246. else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
  1247. HasCancel = OPFD->hasCancel();
  1248. else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
  1249. HasCancel = OPFD->hasCancel();
  1250. else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
  1251. HasCancel = OPFD->hasCancel();
  1252. else if (const auto *OPFD =
  1253. dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
  1254. HasCancel = OPFD->hasCancel();
  1255. else if (const auto *OPFD =
  1256. dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
  1257. HasCancel = OPFD->hasCancel();
  1258. CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
  1259. HasCancel, OutlinedHelperName);
  1260. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  1261. return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
  1262. }
  1263. llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
  1264. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  1265. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
  1266. const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
  1267. return emitParallelOrTeamsOutlinedFunction(
  1268. CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
  1269. }
  1270. llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
  1271. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  1272. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
  1273. const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
  1274. return emitParallelOrTeamsOutlinedFunction(
  1275. CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
  1276. }
  1277. llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
  1278. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  1279. const VarDecl *PartIDVar, const VarDecl *TaskTVar,
  1280. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
  1281. bool Tied, unsigned &NumberOfParts) {
  1282. auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
  1283. PrePostActionTy &) {
  1284. llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
  1285. llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
  1286. llvm::Value *TaskArgs[] = {
  1287. UpLoc, ThreadID,
  1288. CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
  1289. TaskTVar->getType()->castAs<PointerType>())
  1290. .getPointer()};
  1291. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
  1292. };
  1293. CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
  1294. UntiedCodeGen);
  1295. CodeGen.setAction(Action);
  1296. assert(!ThreadIDVar->getType()->isPointerType() &&
  1297. "thread id variable must be of type kmp_int32 for tasks");
  1298. const OpenMPDirectiveKind Region =
  1299. isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
  1300. : OMPD_task;
  1301. const CapturedStmt *CS = D.getCapturedStmt(Region);
  1302. const auto *TD = dyn_cast<OMPTaskDirective>(&D);
  1303. CodeGenFunction CGF(CGM, true);
  1304. CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
  1305. InnermostKind,
  1306. TD ? TD->hasCancel() : false, Action);
  1307. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  1308. llvm::Value *Res = CGF.GenerateCapturedStmtFunction(*CS);
  1309. if (!Tied)
  1310. NumberOfParts = Action.getNumberOfParts();
  1311. return Res;
  1312. }
  1313. static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
  1314. const RecordDecl *RD, const CGRecordLayout &RL,
  1315. ArrayRef<llvm::Constant *> Data) {
  1316. llvm::StructType *StructTy = RL.getLLVMType();
  1317. unsigned PrevIdx = 0;
  1318. ConstantInitBuilder CIBuilder(CGM);
  1319. auto DI = Data.begin();
  1320. for (const FieldDecl *FD : RD->fields()) {
  1321. unsigned Idx = RL.getLLVMFieldNo(FD);
  1322. // Fill the alignment.
  1323. for (unsigned I = PrevIdx; I < Idx; ++I)
  1324. Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
  1325. PrevIdx = Idx + 1;
  1326. Fields.add(*DI);
  1327. ++DI;
  1328. }
  1329. }
  1330. template <class... As>
  1331. static llvm::GlobalVariable *
  1332. createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant,
  1333. ArrayRef<llvm::Constant *> Data, const Twine &Name,
  1334. As &&... Args) {
  1335. const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
  1336. const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
  1337. ConstantInitBuilder CIBuilder(CGM);
  1338. ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
  1339. buildStructValue(Fields, CGM, RD, RL, Data);
  1340. return Fields.finishAndCreateGlobal(
  1341. Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
  1342. std::forward<As>(Args)...);
  1343. }
  1344. template <typename T>
  1345. static void
  1346. createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
  1347. ArrayRef<llvm::Constant *> Data,
  1348. T &Parent) {
  1349. const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
  1350. const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
  1351. ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
  1352. buildStructValue(Fields, CGM, RD, RL, Data);
  1353. Fields.finishAndAddTo(Parent);
  1354. }
  1355. Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
  1356. CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
  1357. unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
  1358. FlagsTy FlagsKey(Flags, Reserved2Flags);
  1359. llvm::Value *Entry = OpenMPDefaultLocMap.lookup(FlagsKey);
  1360. if (!Entry) {
  1361. if (!DefaultOpenMPPSource) {
  1362. // Initialize default location for psource field of ident_t structure of
  1363. // all ident_t objects. Format is ";file;function;line;column;;".
  1364. // Taken from
  1365. // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
  1366. DefaultOpenMPPSource =
  1367. CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
  1368. DefaultOpenMPPSource =
  1369. llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
  1370. }
  1371. llvm::Constant *Data[] = {
  1372. llvm::ConstantInt::getNullValue(CGM.Int32Ty),
  1373. llvm::ConstantInt::get(CGM.Int32Ty, Flags),
  1374. llvm::ConstantInt::get(CGM.Int32Ty, Reserved2Flags),
  1375. llvm::ConstantInt::getNullValue(CGM.Int32Ty), DefaultOpenMPPSource};
  1376. llvm::GlobalValue *DefaultOpenMPLocation =
  1377. createGlobalStruct(CGM, IdentQTy, isDefaultLocationConstant(), Data, "",
  1378. llvm::GlobalValue::PrivateLinkage);
  1379. DefaultOpenMPLocation->setUnnamedAddr(
  1380. llvm::GlobalValue::UnnamedAddr::Global);
  1381. OpenMPDefaultLocMap[FlagsKey] = Entry = DefaultOpenMPLocation;
  1382. }
  1383. return Address(Entry, Align);
  1384. }
  1385. void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
  1386. bool AtCurrentPoint) {
  1387. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1388. assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
  1389. llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
  1390. if (AtCurrentPoint) {
  1391. Elem.second.ServiceInsertPt = new llvm::BitCastInst(
  1392. Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
  1393. } else {
  1394. Elem.second.ServiceInsertPt =
  1395. new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
  1396. Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
  1397. }
  1398. }
  1399. void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
  1400. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1401. if (Elem.second.ServiceInsertPt) {
  1402. llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
  1403. Elem.second.ServiceInsertPt = nullptr;
  1404. Ptr->eraseFromParent();
  1405. }
  1406. }
  1407. llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
  1408. SourceLocation Loc,
  1409. unsigned Flags) {
  1410. Flags |= OMP_IDENT_KMPC;
  1411. // If no debug info is generated - return global default location.
  1412. if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
  1413. Loc.isInvalid())
  1414. return getOrCreateDefaultLocation(Flags).getPointer();
  1415. assert(CGF.CurFn && "No function in current CodeGenFunction.");
  1416. CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
  1417. Address LocValue = Address::invalid();
  1418. auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
  1419. if (I != OpenMPLocThreadIDMap.end())
  1420. LocValue = Address(I->second.DebugLoc, Align);
  1421. // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
  1422. // GetOpenMPThreadID was called before this routine.
  1423. if (!LocValue.isValid()) {
  1424. // Generate "ident_t .kmpc_loc.addr;"
  1425. Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
  1426. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1427. Elem.second.DebugLoc = AI.getPointer();
  1428. LocValue = AI;
  1429. if (!Elem.second.ServiceInsertPt)
  1430. setLocThreadIdInsertPt(CGF);
  1431. CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
  1432. CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
  1433. CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
  1434. CGF.getTypeSize(IdentQTy));
  1435. }
  1436. // char **psource = &.kmpc_loc_<flags>.addr.psource;
  1437. LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
  1438. auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
  1439. LValue PSource =
  1440. CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
  1441. llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
  1442. if (OMPDebugLoc == nullptr) {
  1443. SmallString<128> Buffer2;
  1444. llvm::raw_svector_ostream OS2(Buffer2);
  1445. // Build debug location
  1446. PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
  1447. OS2 << ";" << PLoc.getFilename() << ";";
  1448. if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
  1449. OS2 << FD->getQualifiedNameAsString();
  1450. OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
  1451. OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
  1452. OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
  1453. }
  1454. // *psource = ";<File>;<Function>;<Line>;<Column>;;";
  1455. CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
  1456. // Our callers always pass this to a runtime function, so for
  1457. // convenience, go ahead and return a naked pointer.
  1458. return LocValue.getPointer();
  1459. }
  1460. llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
  1461. SourceLocation Loc) {
  1462. assert(CGF.CurFn && "No function in current CodeGenFunction.");
  1463. llvm::Value *ThreadID = nullptr;
  1464. // Check whether we've already cached a load of the thread id in this
  1465. // function.
  1466. auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
  1467. if (I != OpenMPLocThreadIDMap.end()) {
  1468. ThreadID = I->second.ThreadID;
  1469. if (ThreadID != nullptr)
  1470. return ThreadID;
  1471. }
  1472. // If exceptions are enabled, do not use parameter to avoid possible crash.
  1473. if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
  1474. !CGF.getLangOpts().CXXExceptions ||
  1475. CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
  1476. if (auto *OMPRegionInfo =
  1477. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
  1478. if (OMPRegionInfo->getThreadIDVariable()) {
  1479. // Check if this an outlined function with thread id passed as argument.
  1480. LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
  1481. ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
  1482. // If value loaded in entry block, cache it and use it everywhere in
  1483. // function.
  1484. if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
  1485. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1486. Elem.second.ThreadID = ThreadID;
  1487. }
  1488. return ThreadID;
  1489. }
  1490. }
  1491. }
  1492. // This is not an outlined function region - need to call __kmpc_int32
  1493. // kmpc_global_thread_num(ident_t *loc).
  1494. // Generate thread id value and cache this value for use across the
  1495. // function.
  1496. auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
  1497. if (!Elem.second.ServiceInsertPt)
  1498. setLocThreadIdInsertPt(CGF);
  1499. CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
  1500. CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
  1501. llvm::CallInst *Call = CGF.Builder.CreateCall(
  1502. createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
  1503. emitUpdateLocation(CGF, Loc));
  1504. Call->setCallingConv(CGF.getRuntimeCC());
  1505. Elem.second.ThreadID = Call;
  1506. return Call;
  1507. }
  1508. void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
  1509. assert(CGF.CurFn && "No function in current CodeGenFunction.");
  1510. if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
  1511. clearLocThreadIdInsertPt(CGF);
  1512. OpenMPLocThreadIDMap.erase(CGF.CurFn);
  1513. }
  1514. if (FunctionUDRMap.count(CGF.CurFn) > 0) {
  1515. for(auto *D : FunctionUDRMap[CGF.CurFn])
  1516. UDRMap.erase(D);
  1517. FunctionUDRMap.erase(CGF.CurFn);
  1518. }
  1519. }
  1520. llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
  1521. return IdentTy->getPointerTo();
  1522. }
  1523. llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
  1524. if (!Kmpc_MicroTy) {
  1525. // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
  1526. llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
  1527. llvm::PointerType::getUnqual(CGM.Int32Ty)};
  1528. Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
  1529. }
  1530. return llvm::PointerType::getUnqual(Kmpc_MicroTy);
  1531. }
  1532. llvm::Constant *
  1533. CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
  1534. llvm::Constant *RTLFn = nullptr;
  1535. switch (static_cast<OpenMPRTLFunction>(Function)) {
  1536. case OMPRTL__kmpc_fork_call: {
  1537. // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
  1538. // microtask, ...);
  1539. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1540. getKmpc_MicroPointerTy()};
  1541. auto *FnTy =
  1542. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
  1543. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
  1544. break;
  1545. }
  1546. case OMPRTL__kmpc_global_thread_num: {
  1547. // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
  1548. llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
  1549. auto *FnTy =
  1550. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  1551. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
  1552. break;
  1553. }
  1554. case OMPRTL__kmpc_threadprivate_cached: {
  1555. // Build void *__kmpc_threadprivate_cached(ident_t *loc,
  1556. // kmp_int32 global_tid, void *data, size_t size, void ***cache);
  1557. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1558. CGM.VoidPtrTy, CGM.SizeTy,
  1559. CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
  1560. auto *FnTy =
  1561. llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
  1562. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
  1563. break;
  1564. }
  1565. case OMPRTL__kmpc_critical: {
  1566. // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
  1567. // kmp_critical_name *crit);
  1568. llvm::Type *TypeParams[] = {
  1569. getIdentTyPointerTy(), CGM.Int32Ty,
  1570. llvm::PointerType::getUnqual(KmpCriticalNameTy)};
  1571. auto *FnTy =
  1572. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1573. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
  1574. break;
  1575. }
  1576. case OMPRTL__kmpc_critical_with_hint: {
  1577. // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
  1578. // kmp_critical_name *crit, uintptr_t hint);
  1579. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1580. llvm::PointerType::getUnqual(KmpCriticalNameTy),
  1581. CGM.IntPtrTy};
  1582. auto *FnTy =
  1583. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1584. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
  1585. break;
  1586. }
  1587. case OMPRTL__kmpc_threadprivate_register: {
  1588. // Build void __kmpc_threadprivate_register(ident_t *, void *data,
  1589. // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
  1590. // typedef void *(*kmpc_ctor)(void *);
  1591. auto *KmpcCtorTy =
  1592. llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
  1593. /*isVarArg*/ false)->getPointerTo();
  1594. // typedef void *(*kmpc_cctor)(void *, void *);
  1595. llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
  1596. auto *KmpcCopyCtorTy =
  1597. llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
  1598. /*isVarArg*/ false)
  1599. ->getPointerTo();
  1600. // typedef void (*kmpc_dtor)(void *);
  1601. auto *KmpcDtorTy =
  1602. llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
  1603. ->getPointerTo();
  1604. llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
  1605. KmpcCopyCtorTy, KmpcDtorTy};
  1606. auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
  1607. /*isVarArg*/ false);
  1608. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
  1609. break;
  1610. }
  1611. case OMPRTL__kmpc_end_critical: {
  1612. // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
  1613. // kmp_critical_name *crit);
  1614. llvm::Type *TypeParams[] = {
  1615. getIdentTyPointerTy(), CGM.Int32Ty,
  1616. llvm::PointerType::getUnqual(KmpCriticalNameTy)};
  1617. auto *FnTy =
  1618. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1619. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
  1620. break;
  1621. }
  1622. case OMPRTL__kmpc_cancel_barrier: {
  1623. // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
  1624. // global_tid);
  1625. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1626. auto *FnTy =
  1627. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  1628. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
  1629. break;
  1630. }
  1631. case OMPRTL__kmpc_barrier: {
  1632. // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
  1633. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1634. auto *FnTy =
  1635. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1636. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
  1637. break;
  1638. }
  1639. case OMPRTL__kmpc_for_static_fini: {
  1640. // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
  1641. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1642. auto *FnTy =
  1643. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1644. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
  1645. break;
  1646. }
  1647. case OMPRTL__kmpc_push_num_threads: {
  1648. // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
  1649. // kmp_int32 num_threads)
  1650. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1651. CGM.Int32Ty};
  1652. auto *FnTy =
  1653. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1654. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
  1655. break;
  1656. }
  1657. case OMPRTL__kmpc_serialized_parallel: {
  1658. // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
  1659. // global_tid);
  1660. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1661. auto *FnTy =
  1662. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1663. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
  1664. break;
  1665. }
  1666. case OMPRTL__kmpc_end_serialized_parallel: {
  1667. // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
  1668. // global_tid);
  1669. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1670. auto *FnTy =
  1671. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1672. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
  1673. break;
  1674. }
  1675. case OMPRTL__kmpc_flush: {
  1676. // Build void __kmpc_flush(ident_t *loc);
  1677. llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
  1678. auto *FnTy =
  1679. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1680. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
  1681. break;
  1682. }
  1683. case OMPRTL__kmpc_master: {
  1684. // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
  1685. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1686. auto *FnTy =
  1687. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
  1688. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
  1689. break;
  1690. }
  1691. case OMPRTL__kmpc_end_master: {
  1692. // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
  1693. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1694. auto *FnTy =
  1695. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1696. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
  1697. break;
  1698. }
  1699. case OMPRTL__kmpc_omp_taskyield: {
  1700. // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
  1701. // int end_part);
  1702. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
  1703. auto *FnTy =
  1704. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
  1705. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
  1706. break;
  1707. }
  1708. case OMPRTL__kmpc_single: {
  1709. // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
  1710. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1711. auto *FnTy =
  1712. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
  1713. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
  1714. break;
  1715. }
  1716. case OMPRTL__kmpc_end_single: {
  1717. // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
  1718. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1719. auto *FnTy =
  1720. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1721. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
  1722. break;
  1723. }
  1724. case OMPRTL__kmpc_omp_task_alloc: {
  1725. // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
  1726. // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
  1727. // kmp_routine_entry_t *task_entry);
  1728. assert(KmpRoutineEntryPtrTy != nullptr &&
  1729. "Type kmp_routine_entry_t must be created.");
  1730. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
  1731. CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
  1732. // Return void * and then cast to particular kmp_task_t type.
  1733. auto *FnTy =
  1734. llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
  1735. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
  1736. break;
  1737. }
  1738. case OMPRTL__kmpc_omp_task: {
  1739. // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
  1740. // *new_task);
  1741. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1742. CGM.VoidPtrTy};
  1743. auto *FnTy =
  1744. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
  1745. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
  1746. break;
  1747. }
  1748. case OMPRTL__kmpc_copyprivate: {
  1749. // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
  1750. // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
  1751. // kmp_int32 didit);
  1752. llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
  1753. auto *CpyFnTy =
  1754. llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
  1755. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
  1756. CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
  1757. CGM.Int32Ty};
  1758. auto *FnTy =
  1759. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1760. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
  1761. break;
  1762. }
  1763. case OMPRTL__kmpc_reduce: {
  1764. // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
  1765. // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
  1766. // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
  1767. llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
  1768. auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
  1769. /*isVarArg=*/false);
  1770. llvm::Type *TypeParams[] = {
  1771. getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
  1772. CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
  1773. llvm::PointerType::getUnqual(KmpCriticalNameTy)};
  1774. auto *FnTy =
  1775. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
  1776. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
  1777. break;
  1778. }
  1779. case OMPRTL__kmpc_reduce_nowait: {
  1780. // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
  1781. // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
  1782. // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
  1783. // *lck);
  1784. llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
  1785. auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
  1786. /*isVarArg=*/false);
  1787. llvm::Type *TypeParams[] = {
  1788. getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
  1789. CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
  1790. llvm::PointerType::getUnqual(KmpCriticalNameTy)};
  1791. auto *FnTy =
  1792. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
  1793. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
  1794. break;
  1795. }
  1796. case OMPRTL__kmpc_end_reduce: {
  1797. // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
  1798. // kmp_critical_name *lck);
  1799. llvm::Type *TypeParams[] = {
  1800. getIdentTyPointerTy(), CGM.Int32Ty,
  1801. llvm::PointerType::getUnqual(KmpCriticalNameTy)};
  1802. auto *FnTy =
  1803. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1804. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
  1805. break;
  1806. }
  1807. case OMPRTL__kmpc_end_reduce_nowait: {
  1808. // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
  1809. // kmp_critical_name *lck);
  1810. llvm::Type *TypeParams[] = {
  1811. getIdentTyPointerTy(), CGM.Int32Ty,
  1812. llvm::PointerType::getUnqual(KmpCriticalNameTy)};
  1813. auto *FnTy =
  1814. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1815. RTLFn =
  1816. CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
  1817. break;
  1818. }
  1819. case OMPRTL__kmpc_omp_task_begin_if0: {
  1820. // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
  1821. // *new_task);
  1822. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1823. CGM.VoidPtrTy};
  1824. auto *FnTy =
  1825. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1826. RTLFn =
  1827. CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
  1828. break;
  1829. }
  1830. case OMPRTL__kmpc_omp_task_complete_if0: {
  1831. // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
  1832. // *new_task);
  1833. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1834. CGM.VoidPtrTy};
  1835. auto *FnTy =
  1836. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1837. RTLFn = CGM.CreateRuntimeFunction(FnTy,
  1838. /*Name=*/"__kmpc_omp_task_complete_if0");
  1839. break;
  1840. }
  1841. case OMPRTL__kmpc_ordered: {
  1842. // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
  1843. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1844. auto *FnTy =
  1845. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1846. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
  1847. break;
  1848. }
  1849. case OMPRTL__kmpc_end_ordered: {
  1850. // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
  1851. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1852. auto *FnTy =
  1853. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1854. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
  1855. break;
  1856. }
  1857. case OMPRTL__kmpc_omp_taskwait: {
  1858. // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
  1859. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1860. auto *FnTy =
  1861. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
  1862. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
  1863. break;
  1864. }
  1865. case OMPRTL__kmpc_taskgroup: {
  1866. // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
  1867. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1868. auto *FnTy =
  1869. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1870. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
  1871. break;
  1872. }
  1873. case OMPRTL__kmpc_end_taskgroup: {
  1874. // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
  1875. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1876. auto *FnTy =
  1877. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1878. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
  1879. break;
  1880. }
  1881. case OMPRTL__kmpc_push_proc_bind: {
  1882. // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
  1883. // int proc_bind)
  1884. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
  1885. auto *FnTy =
  1886. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  1887. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
  1888. break;
  1889. }
  1890. case OMPRTL__kmpc_omp_task_with_deps: {
  1891. // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
  1892. // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
  1893. // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
  1894. llvm::Type *TypeParams[] = {
  1895. getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
  1896. CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy};
  1897. auto *FnTy =
  1898. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
  1899. RTLFn =
  1900. CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
  1901. break;
  1902. }
  1903. case OMPRTL__kmpc_omp_wait_deps: {
  1904. // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
  1905. // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
  1906. // kmp_depend_info_t *noalias_dep_list);
  1907. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1908. CGM.Int32Ty, CGM.VoidPtrTy,
  1909. CGM.Int32Ty, CGM.VoidPtrTy};
  1910. auto *FnTy =
  1911. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1912. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
  1913. break;
  1914. }
  1915. case OMPRTL__kmpc_cancellationpoint: {
  1916. // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
  1917. // global_tid, kmp_int32 cncl_kind)
  1918. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
  1919. auto *FnTy =
  1920. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  1921. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
  1922. break;
  1923. }
  1924. case OMPRTL__kmpc_cancel: {
  1925. // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
  1926. // kmp_int32 cncl_kind)
  1927. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
  1928. auto *FnTy =
  1929. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  1930. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
  1931. break;
  1932. }
  1933. case OMPRTL__kmpc_push_num_teams: {
  1934. // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
  1935. // kmp_int32 num_teams, kmp_int32 num_threads)
  1936. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
  1937. CGM.Int32Ty};
  1938. auto *FnTy =
  1939. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  1940. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
  1941. break;
  1942. }
  1943. case OMPRTL__kmpc_fork_teams: {
  1944. // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
  1945. // microtask, ...);
  1946. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1947. getKmpc_MicroPointerTy()};
  1948. auto *FnTy =
  1949. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
  1950. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
  1951. break;
  1952. }
  1953. case OMPRTL__kmpc_taskloop: {
  1954. // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
  1955. // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
  1956. // sched, kmp_uint64 grainsize, void *task_dup);
  1957. llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
  1958. CGM.IntTy,
  1959. CGM.VoidPtrTy,
  1960. CGM.IntTy,
  1961. CGM.Int64Ty->getPointerTo(),
  1962. CGM.Int64Ty->getPointerTo(),
  1963. CGM.Int64Ty,
  1964. CGM.IntTy,
  1965. CGM.IntTy,
  1966. CGM.Int64Ty,
  1967. CGM.VoidPtrTy};
  1968. auto *FnTy =
  1969. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1970. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
  1971. break;
  1972. }
  1973. case OMPRTL__kmpc_doacross_init: {
  1974. // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
  1975. // num_dims, struct kmp_dim *dims);
  1976. llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
  1977. CGM.Int32Ty,
  1978. CGM.Int32Ty,
  1979. CGM.VoidPtrTy};
  1980. auto *FnTy =
  1981. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1982. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
  1983. break;
  1984. }
  1985. case OMPRTL__kmpc_doacross_fini: {
  1986. // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
  1987. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
  1988. auto *FnTy =
  1989. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  1990. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
  1991. break;
  1992. }
  1993. case OMPRTL__kmpc_doacross_post: {
  1994. // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
  1995. // *vec);
  1996. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  1997. CGM.Int64Ty->getPointerTo()};
  1998. auto *FnTy =
  1999. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  2000. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
  2001. break;
  2002. }
  2003. case OMPRTL__kmpc_doacross_wait: {
  2004. // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
  2005. // *vec);
  2006. llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
  2007. CGM.Int64Ty->getPointerTo()};
  2008. auto *FnTy =
  2009. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  2010. RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
  2011. break;
  2012. }
  2013. case OMPRTL__kmpc_task_reduction_init: {
  2014. // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
  2015. // *data);
  2016. llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
  2017. auto *FnTy =
  2018. llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
  2019. RTLFn =
  2020. CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
  2021. break;
  2022. }
  2023. case OMPRTL__kmpc_task_reduction_get_th_data: {
  2024. // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
  2025. // *d);
  2026. llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
  2027. auto *FnTy =
  2028. llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
  2029. RTLFn = CGM.CreateRuntimeFunction(
  2030. FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
  2031. break;
  2032. }
  2033. case OMPRTL__tgt_target: {
  2034. // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
  2035. // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  2036. // *arg_types);
  2037. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2038. CGM.VoidPtrTy,
  2039. CGM.Int32Ty,
  2040. CGM.VoidPtrPtrTy,
  2041. CGM.VoidPtrPtrTy,
  2042. CGM.SizeTy->getPointerTo(),
  2043. CGM.Int64Ty->getPointerTo()};
  2044. auto *FnTy =
  2045. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  2046. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
  2047. break;
  2048. }
  2049. case OMPRTL__tgt_target_nowait: {
  2050. // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
  2051. // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
  2052. // int64_t *arg_types);
  2053. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2054. CGM.VoidPtrTy,
  2055. CGM.Int32Ty,
  2056. CGM.VoidPtrPtrTy,
  2057. CGM.VoidPtrPtrTy,
  2058. CGM.SizeTy->getPointerTo(),
  2059. CGM.Int64Ty->getPointerTo()};
  2060. auto *FnTy =
  2061. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  2062. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
  2063. break;
  2064. }
  2065. case OMPRTL__tgt_target_teams: {
  2066. // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
  2067. // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
  2068. // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
  2069. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2070. CGM.VoidPtrTy,
  2071. CGM.Int32Ty,
  2072. CGM.VoidPtrPtrTy,
  2073. CGM.VoidPtrPtrTy,
  2074. CGM.SizeTy->getPointerTo(),
  2075. CGM.Int64Ty->getPointerTo(),
  2076. CGM.Int32Ty,
  2077. CGM.Int32Ty};
  2078. auto *FnTy =
  2079. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  2080. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
  2081. break;
  2082. }
  2083. case OMPRTL__tgt_target_teams_nowait: {
  2084. // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
  2085. // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
  2086. // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
  2087. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2088. CGM.VoidPtrTy,
  2089. CGM.Int32Ty,
  2090. CGM.VoidPtrPtrTy,
  2091. CGM.VoidPtrPtrTy,
  2092. CGM.SizeTy->getPointerTo(),
  2093. CGM.Int64Ty->getPointerTo(),
  2094. CGM.Int32Ty,
  2095. CGM.Int32Ty};
  2096. auto *FnTy =
  2097. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  2098. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
  2099. break;
  2100. }
  2101. case OMPRTL__tgt_register_lib: {
  2102. // Build void __tgt_register_lib(__tgt_bin_desc *desc);
  2103. QualType ParamTy =
  2104. CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
  2105. llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
  2106. auto *FnTy =
  2107. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  2108. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
  2109. break;
  2110. }
  2111. case OMPRTL__tgt_unregister_lib: {
  2112. // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
  2113. QualType ParamTy =
  2114. CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
  2115. llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
  2116. auto *FnTy =
  2117. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  2118. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
  2119. break;
  2120. }
  2121. case OMPRTL__tgt_target_data_begin: {
  2122. // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
  2123. // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
  2124. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2125. CGM.Int32Ty,
  2126. CGM.VoidPtrPtrTy,
  2127. CGM.VoidPtrPtrTy,
  2128. CGM.SizeTy->getPointerTo(),
  2129. CGM.Int64Ty->getPointerTo()};
  2130. auto *FnTy =
  2131. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  2132. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
  2133. break;
  2134. }
  2135. case OMPRTL__tgt_target_data_begin_nowait: {
  2136. // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
  2137. // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  2138. // *arg_types);
  2139. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2140. CGM.Int32Ty,
  2141. CGM.VoidPtrPtrTy,
  2142. CGM.VoidPtrPtrTy,
  2143. CGM.SizeTy->getPointerTo(),
  2144. CGM.Int64Ty->getPointerTo()};
  2145. auto *FnTy =
  2146. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  2147. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
  2148. break;
  2149. }
  2150. case OMPRTL__tgt_target_data_end: {
  2151. // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
  2152. // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
  2153. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2154. CGM.Int32Ty,
  2155. CGM.VoidPtrPtrTy,
  2156. CGM.VoidPtrPtrTy,
  2157. CGM.SizeTy->getPointerTo(),
  2158. CGM.Int64Ty->getPointerTo()};
  2159. auto *FnTy =
  2160. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  2161. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
  2162. break;
  2163. }
  2164. case OMPRTL__tgt_target_data_end_nowait: {
  2165. // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
  2166. // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  2167. // *arg_types);
  2168. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2169. CGM.Int32Ty,
  2170. CGM.VoidPtrPtrTy,
  2171. CGM.VoidPtrPtrTy,
  2172. CGM.SizeTy->getPointerTo(),
  2173. CGM.Int64Ty->getPointerTo()};
  2174. auto *FnTy =
  2175. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  2176. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
  2177. break;
  2178. }
  2179. case OMPRTL__tgt_target_data_update: {
  2180. // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
  2181. // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
  2182. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2183. CGM.Int32Ty,
  2184. CGM.VoidPtrPtrTy,
  2185. CGM.VoidPtrPtrTy,
  2186. CGM.SizeTy->getPointerTo(),
  2187. CGM.Int64Ty->getPointerTo()};
  2188. auto *FnTy =
  2189. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  2190. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
  2191. break;
  2192. }
  2193. case OMPRTL__tgt_target_data_update_nowait: {
  2194. // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
  2195. // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
  2196. // *arg_types);
  2197. llvm::Type *TypeParams[] = {CGM.Int64Ty,
  2198. CGM.Int32Ty,
  2199. CGM.VoidPtrPtrTy,
  2200. CGM.VoidPtrPtrTy,
  2201. CGM.SizeTy->getPointerTo(),
  2202. CGM.Int64Ty->getPointerTo()};
  2203. auto *FnTy =
  2204. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  2205. RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
  2206. break;
  2207. }
  2208. }
  2209. assert(RTLFn && "Unable to find OpenMP runtime function");
  2210. return RTLFn;
  2211. }
  2212. llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
  2213. bool IVSigned) {
  2214. assert((IVSize == 32 || IVSize == 64) &&
  2215. "IV size is not compatible with the omp runtime");
  2216. StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
  2217. : "__kmpc_for_static_init_4u")
  2218. : (IVSigned ? "__kmpc_for_static_init_8"
  2219. : "__kmpc_for_static_init_8u");
  2220. llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
  2221. auto *PtrTy = llvm::PointerType::getUnqual(ITy);
  2222. llvm::Type *TypeParams[] = {
  2223. getIdentTyPointerTy(), // loc
  2224. CGM.Int32Ty, // tid
  2225. CGM.Int32Ty, // schedtype
  2226. llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
  2227. PtrTy, // p_lower
  2228. PtrTy, // p_upper
  2229. PtrTy, // p_stride
  2230. ITy, // incr
  2231. ITy // chunk
  2232. };
  2233. auto *FnTy =
  2234. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  2235. return CGM.CreateRuntimeFunction(FnTy, Name);
  2236. }
  2237. llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
  2238. bool IVSigned) {
  2239. assert((IVSize == 32 || IVSize == 64) &&
  2240. "IV size is not compatible with the omp runtime");
  2241. StringRef Name =
  2242. IVSize == 32
  2243. ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
  2244. : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
  2245. llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
  2246. llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
  2247. CGM.Int32Ty, // tid
  2248. CGM.Int32Ty, // schedtype
  2249. ITy, // lower
  2250. ITy, // upper
  2251. ITy, // stride
  2252. ITy // chunk
  2253. };
  2254. auto *FnTy =
  2255. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
  2256. return CGM.CreateRuntimeFunction(FnTy, Name);
  2257. }
  2258. llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
  2259. bool IVSigned) {
  2260. assert((IVSize == 32 || IVSize == 64) &&
  2261. "IV size is not compatible with the omp runtime");
  2262. StringRef Name =
  2263. IVSize == 32
  2264. ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
  2265. : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
  2266. llvm::Type *TypeParams[] = {
  2267. getIdentTyPointerTy(), // loc
  2268. CGM.Int32Ty, // tid
  2269. };
  2270. auto *FnTy =
  2271. llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
  2272. return CGM.CreateRuntimeFunction(FnTy, Name);
  2273. }
  2274. llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
  2275. bool IVSigned) {
  2276. assert((IVSize == 32 || IVSize == 64) &&
  2277. "IV size is not compatible with the omp runtime");
  2278. StringRef Name =
  2279. IVSize == 32
  2280. ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
  2281. : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
  2282. llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
  2283. auto *PtrTy = llvm::PointerType::getUnqual(ITy);
  2284. llvm::Type *TypeParams[] = {
  2285. getIdentTyPointerTy(), // loc
  2286. CGM.Int32Ty, // tid
  2287. llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
  2288. PtrTy, // p_lower
  2289. PtrTy, // p_upper
  2290. PtrTy // p_stride
  2291. };
  2292. auto *FnTy =
  2293. llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
  2294. return CGM.CreateRuntimeFunction(FnTy, Name);
  2295. }
  2296. Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
  2297. if (CGM.getLangOpts().OpenMPSimd)
  2298. return Address::invalid();
  2299. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  2300. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  2301. if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
  2302. SmallString<64> PtrName;
  2303. {
  2304. llvm::raw_svector_ostream OS(PtrName);
  2305. OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_link_ptr";
  2306. }
  2307. llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
  2308. if (!Ptr) {
  2309. QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
  2310. Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
  2311. PtrName);
  2312. if (!CGM.getLangOpts().OpenMPIsDevice) {
  2313. auto *GV = cast<llvm::GlobalVariable>(Ptr);
  2314. GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
  2315. GV->setInitializer(CGM.GetAddrOfGlobal(VD));
  2316. }
  2317. CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ptr));
  2318. registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
  2319. }
  2320. return Address(Ptr, CGM.getContext().getDeclAlign(VD));
  2321. }
  2322. return Address::invalid();
  2323. }
  2324. llvm::Constant *
  2325. CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
  2326. assert(!CGM.getLangOpts().OpenMPUseTLS ||
  2327. !CGM.getContext().getTargetInfo().isTLSSupported());
  2328. // Lookup the entry, lazily creating it if necessary.
  2329. std::string Suffix = getName({"cache", ""});
  2330. return getOrCreateInternalVariable(
  2331. CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
  2332. }
  2333. Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
  2334. const VarDecl *VD,
  2335. Address VDAddr,
  2336. SourceLocation Loc) {
  2337. if (CGM.getLangOpts().OpenMPUseTLS &&
  2338. CGM.getContext().getTargetInfo().isTLSSupported())
  2339. return VDAddr;
  2340. llvm::Type *VarTy = VDAddr.getElementType();
  2341. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  2342. CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
  2343. CGM.Int8PtrTy),
  2344. CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
  2345. getOrCreateThreadPrivateCache(VD)};
  2346. return Address(CGF.EmitRuntimeCall(
  2347. createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
  2348. VDAddr.getAlignment());
  2349. }
  2350. void CGOpenMPRuntime::emitThreadPrivateVarInit(
  2351. CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
  2352. llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
  2353. // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
  2354. // library.
  2355. llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
  2356. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
  2357. OMPLoc);
  2358. // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
  2359. // to register constructor/destructor for variable.
  2360. llvm::Value *Args[] = {
  2361. OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
  2362. Ctor, CopyCtor, Dtor};
  2363. CGF.EmitRuntimeCall(
  2364. createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
  2365. }
  2366. llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
  2367. const VarDecl *VD, Address VDAddr, SourceLocation Loc,
  2368. bool PerformInit, CodeGenFunction *CGF) {
  2369. if (CGM.getLangOpts().OpenMPUseTLS &&
  2370. CGM.getContext().getTargetInfo().isTLSSupported())
  2371. return nullptr;
  2372. VD = VD->getDefinition(CGM.getContext());
  2373. if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
  2374. QualType ASTTy = VD->getType();
  2375. llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
  2376. const Expr *Init = VD->getAnyInitializer();
  2377. if (CGM.getLangOpts().CPlusPlus && PerformInit) {
  2378. // Generate function that re-emits the declaration's initializer into the
  2379. // threadprivate copy of the variable VD
  2380. CodeGenFunction CtorCGF(CGM);
  2381. FunctionArgList Args;
  2382. ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
  2383. /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
  2384. ImplicitParamDecl::Other);
  2385. Args.push_back(&Dst);
  2386. const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
  2387. CGM.getContext().VoidPtrTy, Args);
  2388. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  2389. std::string Name = getName({"__kmpc_global_ctor_", ""});
  2390. llvm::Function *Fn =
  2391. CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
  2392. CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
  2393. Args, Loc, Loc);
  2394. llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
  2395. CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
  2396. CGM.getContext().VoidPtrTy, Dst.getLocation());
  2397. Address Arg = Address(ArgVal, VDAddr.getAlignment());
  2398. Arg = CtorCGF.Builder.CreateElementBitCast(
  2399. Arg, CtorCGF.ConvertTypeForMem(ASTTy));
  2400. CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
  2401. /*IsInitializer=*/true);
  2402. ArgVal = CtorCGF.EmitLoadOfScalar(
  2403. CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
  2404. CGM.getContext().VoidPtrTy, Dst.getLocation());
  2405. CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
  2406. CtorCGF.FinishFunction();
  2407. Ctor = Fn;
  2408. }
  2409. if (VD->getType().isDestructedType() != QualType::DK_none) {
  2410. // Generate function that emits destructor call for the threadprivate copy
  2411. // of the variable VD
  2412. CodeGenFunction DtorCGF(CGM);
  2413. FunctionArgList Args;
  2414. ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
  2415. /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
  2416. ImplicitParamDecl::Other);
  2417. Args.push_back(&Dst);
  2418. const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
  2419. CGM.getContext().VoidTy, Args);
  2420. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  2421. std::string Name = getName({"__kmpc_global_dtor_", ""});
  2422. llvm::Function *Fn =
  2423. CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
  2424. auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
  2425. DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
  2426. Loc, Loc);
  2427. // Create a scope with an artificial location for the body of this function.
  2428. auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
  2429. llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
  2430. DtorCGF.GetAddrOfLocalVar(&Dst),
  2431. /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
  2432. DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
  2433. DtorCGF.getDestroyer(ASTTy.isDestructedType()),
  2434. DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
  2435. DtorCGF.FinishFunction();
  2436. Dtor = Fn;
  2437. }
  2438. // Do not emit init function if it is not required.
  2439. if (!Ctor && !Dtor)
  2440. return nullptr;
  2441. llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
  2442. auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
  2443. /*isVarArg=*/false)
  2444. ->getPointerTo();
  2445. // Copying constructor for the threadprivate variable.
  2446. // Must be NULL - reserved by runtime, but currently it requires that this
  2447. // parameter is always NULL. Otherwise it fires assertion.
  2448. CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
  2449. if (Ctor == nullptr) {
  2450. auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
  2451. /*isVarArg=*/false)
  2452. ->getPointerTo();
  2453. Ctor = llvm::Constant::getNullValue(CtorTy);
  2454. }
  2455. if (Dtor == nullptr) {
  2456. auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
  2457. /*isVarArg=*/false)
  2458. ->getPointerTo();
  2459. Dtor = llvm::Constant::getNullValue(DtorTy);
  2460. }
  2461. if (!CGF) {
  2462. auto *InitFunctionTy =
  2463. llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
  2464. std::string Name = getName({"__omp_threadprivate_init_", ""});
  2465. llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
  2466. InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
  2467. CodeGenFunction InitCGF(CGM);
  2468. FunctionArgList ArgList;
  2469. InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
  2470. CGM.getTypes().arrangeNullaryFunction(), ArgList,
  2471. Loc, Loc);
  2472. emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
  2473. InitCGF.FinishFunction();
  2474. return InitFunction;
  2475. }
  2476. emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
  2477. }
  2478. return nullptr;
  2479. }
  2480. /// Obtain information that uniquely identifies a target entry. This
  2481. /// consists of the file and device IDs as well as line number associated with
  2482. /// the relevant entry source location.
  2483. static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
  2484. unsigned &DeviceID, unsigned &FileID,
  2485. unsigned &LineNum) {
  2486. SourceManager &SM = C.getSourceManager();
  2487. // The loc should be always valid and have a file ID (the user cannot use
  2488. // #pragma directives in macros)
  2489. assert(Loc.isValid() && "Source location is expected to be always valid.");
  2490. PresumedLoc PLoc = SM.getPresumedLoc(Loc);
  2491. assert(PLoc.isValid() && "Source location is expected to be always valid.");
  2492. llvm::sys::fs::UniqueID ID;
  2493. if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
  2494. SM.getDiagnostics().Report(diag::err_cannot_open_file)
  2495. << PLoc.getFilename() << EC.message();
  2496. DeviceID = ID.getDevice();
  2497. FileID = ID.getFile();
  2498. LineNum = PLoc.getLine();
  2499. }
  2500. bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
  2501. llvm::GlobalVariable *Addr,
  2502. bool PerformInit) {
  2503. Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  2504. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  2505. if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
  2506. return CGM.getLangOpts().OpenMPIsDevice;
  2507. VD = VD->getDefinition(CGM.getContext());
  2508. if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
  2509. return CGM.getLangOpts().OpenMPIsDevice;
  2510. QualType ASTTy = VD->getType();
  2511. SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
  2512. // Produce the unique prefix to identify the new target regions. We use
  2513. // the source location of the variable declaration which we know to not
  2514. // conflict with any target region.
  2515. unsigned DeviceID;
  2516. unsigned FileID;
  2517. unsigned Line;
  2518. getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
  2519. SmallString<128> Buffer, Out;
  2520. {
  2521. llvm::raw_svector_ostream OS(Buffer);
  2522. OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
  2523. << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
  2524. }
  2525. const Expr *Init = VD->getAnyInitializer();
  2526. if (CGM.getLangOpts().CPlusPlus && PerformInit) {
  2527. llvm::Constant *Ctor;
  2528. llvm::Constant *ID;
  2529. if (CGM.getLangOpts().OpenMPIsDevice) {
  2530. // Generate function that re-emits the declaration's initializer into
  2531. // the threadprivate copy of the variable VD
  2532. CodeGenFunction CtorCGF(CGM);
  2533. const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
  2534. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  2535. llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
  2536. FTy, Twine(Buffer, "_ctor"), FI, Loc);
  2537. auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
  2538. CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
  2539. FunctionArgList(), Loc, Loc);
  2540. auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
  2541. CtorCGF.EmitAnyExprToMem(Init,
  2542. Address(Addr, CGM.getContext().getDeclAlign(VD)),
  2543. Init->getType().getQualifiers(),
  2544. /*IsInitializer=*/true);
  2545. CtorCGF.FinishFunction();
  2546. Ctor = Fn;
  2547. ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
  2548. CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
  2549. } else {
  2550. Ctor = new llvm::GlobalVariable(
  2551. CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
  2552. llvm::GlobalValue::PrivateLinkage,
  2553. llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
  2554. ID = Ctor;
  2555. }
  2556. // Register the information for the entry associated with the constructor.
  2557. Out.clear();
  2558. OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
  2559. DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
  2560. ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
  2561. }
  2562. if (VD->getType().isDestructedType() != QualType::DK_none) {
  2563. llvm::Constant *Dtor;
  2564. llvm::Constant *ID;
  2565. if (CGM.getLangOpts().OpenMPIsDevice) {
  2566. // Generate function that emits destructor call for the threadprivate
  2567. // copy of the variable VD
  2568. CodeGenFunction DtorCGF(CGM);
  2569. const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
  2570. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  2571. llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
  2572. FTy, Twine(Buffer, "_dtor"), FI, Loc);
  2573. auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
  2574. DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
  2575. FunctionArgList(), Loc, Loc);
  2576. // Create a scope with an artificial location for the body of this
  2577. // function.
  2578. auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
  2579. DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
  2580. ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
  2581. DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
  2582. DtorCGF.FinishFunction();
  2583. Dtor = Fn;
  2584. ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
  2585. CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
  2586. } else {
  2587. Dtor = new llvm::GlobalVariable(
  2588. CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
  2589. llvm::GlobalValue::PrivateLinkage,
  2590. llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
  2591. ID = Dtor;
  2592. }
  2593. // Register the information for the entry associated with the destructor.
  2594. Out.clear();
  2595. OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
  2596. DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
  2597. ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
  2598. }
  2599. return CGM.getLangOpts().OpenMPIsDevice;
  2600. }
  2601. Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
  2602. QualType VarType,
  2603. StringRef Name) {
  2604. std::string Suffix = getName({"artificial", ""});
  2605. std::string CacheSuffix = getName({"cache", ""});
  2606. llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
  2607. llvm::Value *GAddr =
  2608. getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
  2609. llvm::Value *Args[] = {
  2610. emitUpdateLocation(CGF, SourceLocation()),
  2611. getThreadID(CGF, SourceLocation()),
  2612. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
  2613. CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
  2614. /*IsSigned=*/false),
  2615. getOrCreateInternalVariable(
  2616. CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
  2617. return Address(
  2618. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  2619. CGF.EmitRuntimeCall(
  2620. createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
  2621. VarLVType->getPointerTo(/*AddrSpace=*/0)),
  2622. CGM.getPointerAlign());
  2623. }
  2624. void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
  2625. const RegionCodeGenTy &ThenGen,
  2626. const RegionCodeGenTy &ElseGen) {
  2627. CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
  2628. // If the condition constant folds and can be elided, try to avoid emitting
  2629. // the condition and the dead arm of the if/else.
  2630. bool CondConstant;
  2631. if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
  2632. if (CondConstant)
  2633. ThenGen(CGF);
  2634. else
  2635. ElseGen(CGF);
  2636. return;
  2637. }
  2638. // Otherwise, the condition did not fold, or we couldn't elide it. Just
  2639. // emit the conditional branch.
  2640. llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
  2641. llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
  2642. llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
  2643. CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
  2644. // Emit the 'then' code.
  2645. CGF.EmitBlock(ThenBlock);
  2646. ThenGen(CGF);
  2647. CGF.EmitBranch(ContBlock);
  2648. // Emit the 'else' code if present.
  2649. // There is no need to emit line number for unconditional branch.
  2650. (void)ApplyDebugLocation::CreateEmpty(CGF);
  2651. CGF.EmitBlock(ElseBlock);
  2652. ElseGen(CGF);
  2653. // There is no need to emit line number for unconditional branch.
  2654. (void)ApplyDebugLocation::CreateEmpty(CGF);
  2655. CGF.EmitBranch(ContBlock);
  2656. // Emit the continuation block for code after the if.
  2657. CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
  2658. }
  2659. void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
  2660. llvm::Value *OutlinedFn,
  2661. ArrayRef<llvm::Value *> CapturedVars,
  2662. const Expr *IfCond) {
  2663. if (!CGF.HaveInsertPoint())
  2664. return;
  2665. llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
  2666. auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
  2667. PrePostActionTy &) {
  2668. // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
  2669. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  2670. llvm::Value *Args[] = {
  2671. RTLoc,
  2672. CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
  2673. CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
  2674. llvm::SmallVector<llvm::Value *, 16> RealArgs;
  2675. RealArgs.append(std::begin(Args), std::end(Args));
  2676. RealArgs.append(CapturedVars.begin(), CapturedVars.end());
  2677. llvm::Value *RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
  2678. CGF.EmitRuntimeCall(RTLFn, RealArgs);
  2679. };
  2680. auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
  2681. PrePostActionTy &) {
  2682. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  2683. llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
  2684. // Build calls:
  2685. // __kmpc_serialized_parallel(&Loc, GTid);
  2686. llvm::Value *Args[] = {RTLoc, ThreadID};
  2687. CGF.EmitRuntimeCall(
  2688. RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
  2689. // OutlinedFn(&GTid, &zero, CapturedStruct);
  2690. Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
  2691. /*Name*/ ".zero.addr");
  2692. CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
  2693. llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
  2694. // ThreadId for serialized parallels is 0.
  2695. OutlinedFnArgs.push_back(ZeroAddr.getPointer());
  2696. OutlinedFnArgs.push_back(ZeroAddr.getPointer());
  2697. OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
  2698. RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
  2699. // __kmpc_end_serialized_parallel(&Loc, GTid);
  2700. llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
  2701. CGF.EmitRuntimeCall(
  2702. RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
  2703. EndArgs);
  2704. };
  2705. if (IfCond) {
  2706. emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
  2707. } else {
  2708. RegionCodeGenTy ThenRCG(ThenGen);
  2709. ThenRCG(CGF);
  2710. }
  2711. }
  2712. // If we're inside an (outlined) parallel region, use the region info's
  2713. // thread-ID variable (it is passed in a first argument of the outlined function
  2714. // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
  2715. // regular serial code region, get thread ID by calling kmp_int32
  2716. // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
  2717. // return the address of that temp.
  2718. Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
  2719. SourceLocation Loc) {
  2720. if (auto *OMPRegionInfo =
  2721. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  2722. if (OMPRegionInfo->getThreadIDVariable())
  2723. return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
  2724. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  2725. QualType Int32Ty =
  2726. CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
  2727. Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
  2728. CGF.EmitStoreOfScalar(ThreadID,
  2729. CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
  2730. return ThreadIDTemp;
  2731. }
  2732. llvm::Constant *
  2733. CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
  2734. const llvm::Twine &Name) {
  2735. SmallString<256> Buffer;
  2736. llvm::raw_svector_ostream Out(Buffer);
  2737. Out << Name;
  2738. StringRef RuntimeName = Out.str();
  2739. auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
  2740. if (Elem.second) {
  2741. assert(Elem.second->getType()->getPointerElementType() == Ty &&
  2742. "OMP internal variable has different type than requested");
  2743. return &*Elem.second;
  2744. }
  2745. return Elem.second = new llvm::GlobalVariable(
  2746. CGM.getModule(), Ty, /*IsConstant*/ false,
  2747. llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
  2748. Elem.first());
  2749. }
  2750. llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
  2751. std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
  2752. std::string Name = getName({Prefix, "var"});
  2753. return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
  2754. }
  2755. namespace {
  2756. /// Common pre(post)-action for different OpenMP constructs.
  2757. class CommonActionTy final : public PrePostActionTy {
  2758. llvm::Value *EnterCallee;
  2759. ArrayRef<llvm::Value *> EnterArgs;
  2760. llvm::Value *ExitCallee;
  2761. ArrayRef<llvm::Value *> ExitArgs;
  2762. bool Conditional;
  2763. llvm::BasicBlock *ContBlock = nullptr;
  2764. public:
  2765. CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
  2766. llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
  2767. bool Conditional = false)
  2768. : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
  2769. ExitArgs(ExitArgs), Conditional(Conditional) {}
  2770. void Enter(CodeGenFunction &CGF) override {
  2771. llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
  2772. if (Conditional) {
  2773. llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
  2774. auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
  2775. ContBlock = CGF.createBasicBlock("omp_if.end");
  2776. // Generate the branch (If-stmt)
  2777. CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
  2778. CGF.EmitBlock(ThenBlock);
  2779. }
  2780. }
  2781. void Done(CodeGenFunction &CGF) {
  2782. // Emit the rest of blocks/branches
  2783. CGF.EmitBranch(ContBlock);
  2784. CGF.EmitBlock(ContBlock, true);
  2785. }
  2786. void Exit(CodeGenFunction &CGF) override {
  2787. CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
  2788. }
  2789. };
  2790. } // anonymous namespace
  2791. void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
  2792. StringRef CriticalName,
  2793. const RegionCodeGenTy &CriticalOpGen,
  2794. SourceLocation Loc, const Expr *Hint) {
  2795. // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
  2796. // CriticalOpGen();
  2797. // __kmpc_end_critical(ident_t *, gtid, Lock);
  2798. // Prepare arguments and build a call to __kmpc_critical
  2799. if (!CGF.HaveInsertPoint())
  2800. return;
  2801. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  2802. getCriticalRegionLock(CriticalName)};
  2803. llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
  2804. std::end(Args));
  2805. if (Hint) {
  2806. EnterArgs.push_back(CGF.Builder.CreateIntCast(
  2807. CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
  2808. }
  2809. CommonActionTy Action(
  2810. createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
  2811. : OMPRTL__kmpc_critical),
  2812. EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
  2813. CriticalOpGen.setAction(Action);
  2814. emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
  2815. }
  2816. void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
  2817. const RegionCodeGenTy &MasterOpGen,
  2818. SourceLocation Loc) {
  2819. if (!CGF.HaveInsertPoint())
  2820. return;
  2821. // if(__kmpc_master(ident_t *, gtid)) {
  2822. // MasterOpGen();
  2823. // __kmpc_end_master(ident_t *, gtid);
  2824. // }
  2825. // Prepare arguments and build a call to __kmpc_master
  2826. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  2827. CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
  2828. createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
  2829. /*Conditional=*/true);
  2830. MasterOpGen.setAction(Action);
  2831. emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
  2832. Action.Done(CGF);
  2833. }
  2834. void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
  2835. SourceLocation Loc) {
  2836. if (!CGF.HaveInsertPoint())
  2837. return;
  2838. // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
  2839. llvm::Value *Args[] = {
  2840. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  2841. llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
  2842. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
  2843. if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  2844. Region->emitUntiedSwitch(CGF);
  2845. }
  2846. void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
  2847. const RegionCodeGenTy &TaskgroupOpGen,
  2848. SourceLocation Loc) {
  2849. if (!CGF.HaveInsertPoint())
  2850. return;
  2851. // __kmpc_taskgroup(ident_t *, gtid);
  2852. // TaskgroupOpGen();
  2853. // __kmpc_end_taskgroup(ident_t *, gtid);
  2854. // Prepare arguments and build a call to __kmpc_taskgroup
  2855. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  2856. CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
  2857. createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
  2858. Args);
  2859. TaskgroupOpGen.setAction(Action);
  2860. emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
  2861. }
  2862. /// Given an array of pointers to variables, project the address of a
  2863. /// given variable.
  2864. static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
  2865. unsigned Index, const VarDecl *Var) {
  2866. // Pull out the pointer to the variable.
  2867. Address PtrAddr =
  2868. CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
  2869. llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
  2870. Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
  2871. Addr = CGF.Builder.CreateElementBitCast(
  2872. Addr, CGF.ConvertTypeForMem(Var->getType()));
  2873. return Addr;
  2874. }
  2875. static llvm::Value *emitCopyprivateCopyFunction(
  2876. CodeGenModule &CGM, llvm::Type *ArgsType,
  2877. ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
  2878. ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
  2879. SourceLocation Loc) {
  2880. ASTContext &C = CGM.getContext();
  2881. // void copy_func(void *LHSArg, void *RHSArg);
  2882. FunctionArgList Args;
  2883. ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  2884. ImplicitParamDecl::Other);
  2885. ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  2886. ImplicitParamDecl::Other);
  2887. Args.push_back(&LHSArg);
  2888. Args.push_back(&RHSArg);
  2889. const auto &CGFI =
  2890. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  2891. std::string Name =
  2892. CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
  2893. auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
  2894. llvm::GlobalValue::InternalLinkage, Name,
  2895. &CGM.getModule());
  2896. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
  2897. Fn->setDoesNotRecurse();
  2898. CodeGenFunction CGF(CGM);
  2899. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
  2900. // Dest = (void*[n])(LHSArg);
  2901. // Src = (void*[n])(RHSArg);
  2902. Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  2903. CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
  2904. ArgsType), CGF.getPointerAlign());
  2905. Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  2906. CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
  2907. ArgsType), CGF.getPointerAlign());
  2908. // *(Type0*)Dst[0] = *(Type0*)Src[0];
  2909. // *(Type1*)Dst[1] = *(Type1*)Src[1];
  2910. // ...
  2911. // *(Typen*)Dst[n] = *(Typen*)Src[n];
  2912. for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
  2913. const auto *DestVar =
  2914. cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
  2915. Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
  2916. const auto *SrcVar =
  2917. cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
  2918. Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
  2919. const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
  2920. QualType Type = VD->getType();
  2921. CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
  2922. }
  2923. CGF.FinishFunction();
  2924. return Fn;
  2925. }
  2926. void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
  2927. const RegionCodeGenTy &SingleOpGen,
  2928. SourceLocation Loc,
  2929. ArrayRef<const Expr *> CopyprivateVars,
  2930. ArrayRef<const Expr *> SrcExprs,
  2931. ArrayRef<const Expr *> DstExprs,
  2932. ArrayRef<const Expr *> AssignmentOps) {
  2933. if (!CGF.HaveInsertPoint())
  2934. return;
  2935. assert(CopyprivateVars.size() == SrcExprs.size() &&
  2936. CopyprivateVars.size() == DstExprs.size() &&
  2937. CopyprivateVars.size() == AssignmentOps.size());
  2938. ASTContext &C = CGM.getContext();
  2939. // int32 did_it = 0;
  2940. // if(__kmpc_single(ident_t *, gtid)) {
  2941. // SingleOpGen();
  2942. // __kmpc_end_single(ident_t *, gtid);
  2943. // did_it = 1;
  2944. // }
  2945. // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
  2946. // <copy_func>, did_it);
  2947. Address DidIt = Address::invalid();
  2948. if (!CopyprivateVars.empty()) {
  2949. // int32 did_it = 0;
  2950. QualType KmpInt32Ty =
  2951. C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
  2952. DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
  2953. CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
  2954. }
  2955. // Prepare arguments and build a call to __kmpc_single
  2956. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  2957. CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
  2958. createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
  2959. /*Conditional=*/true);
  2960. SingleOpGen.setAction(Action);
  2961. emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
  2962. if (DidIt.isValid()) {
  2963. // did_it = 1;
  2964. CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
  2965. }
  2966. Action.Done(CGF);
  2967. // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
  2968. // <copy_func>, did_it);
  2969. if (DidIt.isValid()) {
  2970. llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
  2971. QualType CopyprivateArrayTy =
  2972. C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
  2973. /*IndexTypeQuals=*/0);
  2974. // Create a list of all private variables for copyprivate.
  2975. Address CopyprivateList =
  2976. CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
  2977. for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
  2978. Address Elem = CGF.Builder.CreateConstArrayGEP(
  2979. CopyprivateList, I, CGF.getPointerSize());
  2980. CGF.Builder.CreateStore(
  2981. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  2982. CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
  2983. Elem);
  2984. }
  2985. // Build function that copies private values from single region to all other
  2986. // threads in the corresponding parallel region.
  2987. llvm::Value *CpyFn = emitCopyprivateCopyFunction(
  2988. CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
  2989. CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
  2990. llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
  2991. Address CL =
  2992. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
  2993. CGF.VoidPtrTy);
  2994. llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
  2995. llvm::Value *Args[] = {
  2996. emitUpdateLocation(CGF, Loc), // ident_t *<loc>
  2997. getThreadID(CGF, Loc), // i32 <gtid>
  2998. BufSize, // size_t <buf_size>
  2999. CL.getPointer(), // void *<copyprivate list>
  3000. CpyFn, // void (*) (void *, void *) <copy_func>
  3001. DidItVal // i32 did_it
  3002. };
  3003. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
  3004. }
  3005. }
  3006. void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
  3007. const RegionCodeGenTy &OrderedOpGen,
  3008. SourceLocation Loc, bool IsThreads) {
  3009. if (!CGF.HaveInsertPoint())
  3010. return;
  3011. // __kmpc_ordered(ident_t *, gtid);
  3012. // OrderedOpGen();
  3013. // __kmpc_end_ordered(ident_t *, gtid);
  3014. // Prepare arguments and build a call to __kmpc_ordered
  3015. if (IsThreads) {
  3016. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  3017. CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
  3018. createRuntimeFunction(OMPRTL__kmpc_end_ordered),
  3019. Args);
  3020. OrderedOpGen.setAction(Action);
  3021. emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
  3022. return;
  3023. }
  3024. emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
  3025. }
  3026. void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
  3027. OpenMPDirectiveKind Kind, bool EmitChecks,
  3028. bool ForceSimpleCall) {
  3029. if (!CGF.HaveInsertPoint())
  3030. return;
  3031. // Build call __kmpc_cancel_barrier(loc, thread_id);
  3032. // Build call __kmpc_barrier(loc, thread_id);
  3033. unsigned Flags;
  3034. if (Kind == OMPD_for)
  3035. Flags = OMP_IDENT_BARRIER_IMPL_FOR;
  3036. else if (Kind == OMPD_sections)
  3037. Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
  3038. else if (Kind == OMPD_single)
  3039. Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
  3040. else if (Kind == OMPD_barrier)
  3041. Flags = OMP_IDENT_BARRIER_EXPL;
  3042. else
  3043. Flags = OMP_IDENT_BARRIER_IMPL;
  3044. // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
  3045. // thread_id);
  3046. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
  3047. getThreadID(CGF, Loc)};
  3048. if (auto *OMPRegionInfo =
  3049. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
  3050. if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
  3051. llvm::Value *Result = CGF.EmitRuntimeCall(
  3052. createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
  3053. if (EmitChecks) {
  3054. // if (__kmpc_cancel_barrier()) {
  3055. // exit from construct;
  3056. // }
  3057. llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
  3058. llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
  3059. llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
  3060. CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
  3061. CGF.EmitBlock(ExitBB);
  3062. // exit from construct;
  3063. CodeGenFunction::JumpDest CancelDestination =
  3064. CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
  3065. CGF.EmitBranchThroughCleanup(CancelDestination);
  3066. CGF.EmitBlock(ContBB, /*IsFinished=*/true);
  3067. }
  3068. return;
  3069. }
  3070. }
  3071. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
  3072. }
  3073. /// Map the OpenMP loop schedule to the runtime enumeration.
  3074. static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
  3075. bool Chunked, bool Ordered) {
  3076. switch (ScheduleKind) {
  3077. case OMPC_SCHEDULE_static:
  3078. return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
  3079. : (Ordered ? OMP_ord_static : OMP_sch_static);
  3080. case OMPC_SCHEDULE_dynamic:
  3081. return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
  3082. case OMPC_SCHEDULE_guided:
  3083. return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
  3084. case OMPC_SCHEDULE_runtime:
  3085. return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
  3086. case OMPC_SCHEDULE_auto:
  3087. return Ordered ? OMP_ord_auto : OMP_sch_auto;
  3088. case OMPC_SCHEDULE_unknown:
  3089. assert(!Chunked && "chunk was specified but schedule kind not known");
  3090. return Ordered ? OMP_ord_static : OMP_sch_static;
  3091. }
  3092. llvm_unreachable("Unexpected runtime schedule");
  3093. }
  3094. /// Map the OpenMP distribute schedule to the runtime enumeration.
  3095. static OpenMPSchedType
  3096. getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
  3097. // only static is allowed for dist_schedule
  3098. return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
  3099. }
  3100. bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
  3101. bool Chunked) const {
  3102. OpenMPSchedType Schedule =
  3103. getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
  3104. return Schedule == OMP_sch_static;
  3105. }
  3106. bool CGOpenMPRuntime::isStaticNonchunked(
  3107. OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
  3108. OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
  3109. return Schedule == OMP_dist_sch_static;
  3110. }
  3111. bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
  3112. bool Chunked) const {
  3113. OpenMPSchedType Schedule =
  3114. getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
  3115. return Schedule == OMP_sch_static_chunked;
  3116. }
  3117. bool CGOpenMPRuntime::isStaticChunked(
  3118. OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
  3119. OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
  3120. return Schedule == OMP_dist_sch_static_chunked;
  3121. }
  3122. bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
  3123. OpenMPSchedType Schedule =
  3124. getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
  3125. assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
  3126. return Schedule != OMP_sch_static;
  3127. }
  3128. static int addMonoNonMonoModifier(OpenMPSchedType Schedule,
  3129. OpenMPScheduleClauseModifier M1,
  3130. OpenMPScheduleClauseModifier M2) {
  3131. int Modifier = 0;
  3132. switch (M1) {
  3133. case OMPC_SCHEDULE_MODIFIER_monotonic:
  3134. Modifier = OMP_sch_modifier_monotonic;
  3135. break;
  3136. case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
  3137. Modifier = OMP_sch_modifier_nonmonotonic;
  3138. break;
  3139. case OMPC_SCHEDULE_MODIFIER_simd:
  3140. if (Schedule == OMP_sch_static_chunked)
  3141. Schedule = OMP_sch_static_balanced_chunked;
  3142. break;
  3143. case OMPC_SCHEDULE_MODIFIER_last:
  3144. case OMPC_SCHEDULE_MODIFIER_unknown:
  3145. break;
  3146. }
  3147. switch (M2) {
  3148. case OMPC_SCHEDULE_MODIFIER_monotonic:
  3149. Modifier = OMP_sch_modifier_monotonic;
  3150. break;
  3151. case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
  3152. Modifier = OMP_sch_modifier_nonmonotonic;
  3153. break;
  3154. case OMPC_SCHEDULE_MODIFIER_simd:
  3155. if (Schedule == OMP_sch_static_chunked)
  3156. Schedule = OMP_sch_static_balanced_chunked;
  3157. break;
  3158. case OMPC_SCHEDULE_MODIFIER_last:
  3159. case OMPC_SCHEDULE_MODIFIER_unknown:
  3160. break;
  3161. }
  3162. return Schedule | Modifier;
  3163. }
  3164. void CGOpenMPRuntime::emitForDispatchInit(
  3165. CodeGenFunction &CGF, SourceLocation Loc,
  3166. const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
  3167. bool Ordered, const DispatchRTInput &DispatchValues) {
  3168. if (!CGF.HaveInsertPoint())
  3169. return;
  3170. OpenMPSchedType Schedule = getRuntimeSchedule(
  3171. ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
  3172. assert(Ordered ||
  3173. (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
  3174. Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
  3175. Schedule != OMP_sch_static_balanced_chunked));
  3176. // Call __kmpc_dispatch_init(
  3177. // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
  3178. // kmp_int[32|64] lower, kmp_int[32|64] upper,
  3179. // kmp_int[32|64] stride, kmp_int[32|64] chunk);
  3180. // If the Chunk was not specified in the clause - use default value 1.
  3181. llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
  3182. : CGF.Builder.getIntN(IVSize, 1);
  3183. llvm::Value *Args[] = {
  3184. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  3185. CGF.Builder.getInt32(addMonoNonMonoModifier(
  3186. Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
  3187. DispatchValues.LB, // Lower
  3188. DispatchValues.UB, // Upper
  3189. CGF.Builder.getIntN(IVSize, 1), // Stride
  3190. Chunk // Chunk
  3191. };
  3192. CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
  3193. }
  3194. static void emitForStaticInitCall(
  3195. CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
  3196. llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
  3197. OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
  3198. const CGOpenMPRuntime::StaticRTInput &Values) {
  3199. if (!CGF.HaveInsertPoint())
  3200. return;
  3201. assert(!Values.Ordered);
  3202. assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
  3203. Schedule == OMP_sch_static_balanced_chunked ||
  3204. Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
  3205. Schedule == OMP_dist_sch_static ||
  3206. Schedule == OMP_dist_sch_static_chunked);
  3207. // Call __kmpc_for_static_init(
  3208. // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
  3209. // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
  3210. // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
  3211. // kmp_int[32|64] incr, kmp_int[32|64] chunk);
  3212. llvm::Value *Chunk = Values.Chunk;
  3213. if (Chunk == nullptr) {
  3214. assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
  3215. Schedule == OMP_dist_sch_static) &&
  3216. "expected static non-chunked schedule");
  3217. // If the Chunk was not specified in the clause - use default value 1.
  3218. Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
  3219. } else {
  3220. assert((Schedule == OMP_sch_static_chunked ||
  3221. Schedule == OMP_sch_static_balanced_chunked ||
  3222. Schedule == OMP_ord_static_chunked ||
  3223. Schedule == OMP_dist_sch_static_chunked) &&
  3224. "expected static chunked schedule");
  3225. }
  3226. llvm::Value *Args[] = {
  3227. UpdateLocation,
  3228. ThreadId,
  3229. CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
  3230. M2)), // Schedule type
  3231. Values.IL.getPointer(), // &isLastIter
  3232. Values.LB.getPointer(), // &LB
  3233. Values.UB.getPointer(), // &UB
  3234. Values.ST.getPointer(), // &Stride
  3235. CGF.Builder.getIntN(Values.IVSize, 1), // Incr
  3236. Chunk // Chunk
  3237. };
  3238. CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
  3239. }
  3240. void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
  3241. SourceLocation Loc,
  3242. OpenMPDirectiveKind DKind,
  3243. const OpenMPScheduleTy &ScheduleKind,
  3244. const StaticRTInput &Values) {
  3245. OpenMPSchedType ScheduleNum = getRuntimeSchedule(
  3246. ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
  3247. assert(isOpenMPWorksharingDirective(DKind) &&
  3248. "Expected loop-based or sections-based directive.");
  3249. llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
  3250. isOpenMPLoopDirective(DKind)
  3251. ? OMP_IDENT_WORK_LOOP
  3252. : OMP_IDENT_WORK_SECTIONS);
  3253. llvm::Value *ThreadId = getThreadID(CGF, Loc);
  3254. llvm::Constant *StaticInitFunction =
  3255. createForStaticInitFunction(Values.IVSize, Values.IVSigned);
  3256. emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
  3257. ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
  3258. }
  3259. void CGOpenMPRuntime::emitDistributeStaticInit(
  3260. CodeGenFunction &CGF, SourceLocation Loc,
  3261. OpenMPDistScheduleClauseKind SchedKind,
  3262. const CGOpenMPRuntime::StaticRTInput &Values) {
  3263. OpenMPSchedType ScheduleNum =
  3264. getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
  3265. llvm::Value *UpdatedLocation =
  3266. emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
  3267. llvm::Value *ThreadId = getThreadID(CGF, Loc);
  3268. llvm::Constant *StaticInitFunction =
  3269. createForStaticInitFunction(Values.IVSize, Values.IVSigned);
  3270. emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
  3271. ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
  3272. OMPC_SCHEDULE_MODIFIER_unknown, Values);
  3273. }
  3274. void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
  3275. SourceLocation Loc,
  3276. OpenMPDirectiveKind DKind) {
  3277. if (!CGF.HaveInsertPoint())
  3278. return;
  3279. // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
  3280. llvm::Value *Args[] = {
  3281. emitUpdateLocation(CGF, Loc,
  3282. isOpenMPDistributeDirective(DKind)
  3283. ? OMP_IDENT_WORK_DISTRIBUTE
  3284. : isOpenMPLoopDirective(DKind)
  3285. ? OMP_IDENT_WORK_LOOP
  3286. : OMP_IDENT_WORK_SECTIONS),
  3287. getThreadID(CGF, Loc)};
  3288. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
  3289. Args);
  3290. }
  3291. void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
  3292. SourceLocation Loc,
  3293. unsigned IVSize,
  3294. bool IVSigned) {
  3295. if (!CGF.HaveInsertPoint())
  3296. return;
  3297. // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
  3298. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  3299. CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
  3300. }
  3301. llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
  3302. SourceLocation Loc, unsigned IVSize,
  3303. bool IVSigned, Address IL,
  3304. Address LB, Address UB,
  3305. Address ST) {
  3306. // Call __kmpc_dispatch_next(
  3307. // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
  3308. // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
  3309. // kmp_int[32|64] *p_stride);
  3310. llvm::Value *Args[] = {
  3311. emitUpdateLocation(CGF, Loc),
  3312. getThreadID(CGF, Loc),
  3313. IL.getPointer(), // &isLastIter
  3314. LB.getPointer(), // &Lower
  3315. UB.getPointer(), // &Upper
  3316. ST.getPointer() // &Stride
  3317. };
  3318. llvm::Value *Call =
  3319. CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
  3320. return CGF.EmitScalarConversion(
  3321. Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
  3322. CGF.getContext().BoolTy, Loc);
  3323. }
  3324. void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
  3325. llvm::Value *NumThreads,
  3326. SourceLocation Loc) {
  3327. if (!CGF.HaveInsertPoint())
  3328. return;
  3329. // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
  3330. llvm::Value *Args[] = {
  3331. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  3332. CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
  3333. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
  3334. Args);
  3335. }
  3336. void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
  3337. OpenMPProcBindClauseKind ProcBind,
  3338. SourceLocation Loc) {
  3339. if (!CGF.HaveInsertPoint())
  3340. return;
  3341. // Constants for proc bind value accepted by the runtime.
  3342. enum ProcBindTy {
  3343. ProcBindFalse = 0,
  3344. ProcBindTrue,
  3345. ProcBindMaster,
  3346. ProcBindClose,
  3347. ProcBindSpread,
  3348. ProcBindIntel,
  3349. ProcBindDefault
  3350. } RuntimeProcBind;
  3351. switch (ProcBind) {
  3352. case OMPC_PROC_BIND_master:
  3353. RuntimeProcBind = ProcBindMaster;
  3354. break;
  3355. case OMPC_PROC_BIND_close:
  3356. RuntimeProcBind = ProcBindClose;
  3357. break;
  3358. case OMPC_PROC_BIND_spread:
  3359. RuntimeProcBind = ProcBindSpread;
  3360. break;
  3361. case OMPC_PROC_BIND_unknown:
  3362. llvm_unreachable("Unsupported proc_bind value.");
  3363. }
  3364. // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
  3365. llvm::Value *Args[] = {
  3366. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  3367. llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
  3368. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
  3369. }
  3370. void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
  3371. SourceLocation Loc) {
  3372. if (!CGF.HaveInsertPoint())
  3373. return;
  3374. // Build call void __kmpc_flush(ident_t *loc)
  3375. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
  3376. emitUpdateLocation(CGF, Loc));
  3377. }
  3378. namespace {
  3379. /// Indexes of fields for type kmp_task_t.
  3380. enum KmpTaskTFields {
  3381. /// List of shared variables.
  3382. KmpTaskTShareds,
  3383. /// Task routine.
  3384. KmpTaskTRoutine,
  3385. /// Partition id for the untied tasks.
  3386. KmpTaskTPartId,
  3387. /// Function with call of destructors for private variables.
  3388. Data1,
  3389. /// Task priority.
  3390. Data2,
  3391. /// (Taskloops only) Lower bound.
  3392. KmpTaskTLowerBound,
  3393. /// (Taskloops only) Upper bound.
  3394. KmpTaskTUpperBound,
  3395. /// (Taskloops only) Stride.
  3396. KmpTaskTStride,
  3397. /// (Taskloops only) Is last iteration flag.
  3398. KmpTaskTLastIter,
  3399. /// (Taskloops only) Reduction data.
  3400. KmpTaskTReductions,
  3401. };
  3402. } // anonymous namespace
  3403. bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
  3404. return OffloadEntriesTargetRegion.empty() &&
  3405. OffloadEntriesDeviceGlobalVar.empty();
  3406. }
  3407. /// Initialize target region entry.
  3408. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  3409. initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
  3410. StringRef ParentName, unsigned LineNum,
  3411. unsigned Order) {
  3412. assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
  3413. "only required for the device "
  3414. "code generation.");
  3415. OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
  3416. OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
  3417. OMPTargetRegionEntryTargetRegion);
  3418. ++OffloadingEntriesNum;
  3419. }
  3420. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  3421. registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
  3422. StringRef ParentName, unsigned LineNum,
  3423. llvm::Constant *Addr, llvm::Constant *ID,
  3424. OMPTargetRegionEntryKind Flags) {
  3425. // If we are emitting code for a target, the entry is already initialized,
  3426. // only has to be registered.
  3427. if (CGM.getLangOpts().OpenMPIsDevice) {
  3428. if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
  3429. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3430. DiagnosticsEngine::Error,
  3431. "Unable to find target region on line '%0' in the device code.");
  3432. CGM.getDiags().Report(DiagID) << LineNum;
  3433. return;
  3434. }
  3435. auto &Entry =
  3436. OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
  3437. assert(Entry.isValid() && "Entry not initialized!");
  3438. Entry.setAddress(Addr);
  3439. Entry.setID(ID);
  3440. Entry.setFlags(Flags);
  3441. } else {
  3442. OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
  3443. OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
  3444. ++OffloadingEntriesNum;
  3445. }
  3446. }
  3447. bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
  3448. unsigned DeviceID, unsigned FileID, StringRef ParentName,
  3449. unsigned LineNum) const {
  3450. auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
  3451. if (PerDevice == OffloadEntriesTargetRegion.end())
  3452. return false;
  3453. auto PerFile = PerDevice->second.find(FileID);
  3454. if (PerFile == PerDevice->second.end())
  3455. return false;
  3456. auto PerParentName = PerFile->second.find(ParentName);
  3457. if (PerParentName == PerFile->second.end())
  3458. return false;
  3459. auto PerLine = PerParentName->second.find(LineNum);
  3460. if (PerLine == PerParentName->second.end())
  3461. return false;
  3462. // Fail if this entry is already registered.
  3463. if (PerLine->second.getAddress() || PerLine->second.getID())
  3464. return false;
  3465. return true;
  3466. }
  3467. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
  3468. const OffloadTargetRegionEntryInfoActTy &Action) {
  3469. // Scan all target region entries and perform the provided action.
  3470. for (const auto &D : OffloadEntriesTargetRegion)
  3471. for (const auto &F : D.second)
  3472. for (const auto &P : F.second)
  3473. for (const auto &L : P.second)
  3474. Action(D.first, F.first, P.first(), L.first, L.second);
  3475. }
  3476. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  3477. initializeDeviceGlobalVarEntryInfo(StringRef Name,
  3478. OMPTargetGlobalVarEntryKind Flags,
  3479. unsigned Order) {
  3480. assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
  3481. "only required for the device "
  3482. "code generation.");
  3483. OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
  3484. ++OffloadingEntriesNum;
  3485. }
  3486. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  3487. registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
  3488. CharUnits VarSize,
  3489. OMPTargetGlobalVarEntryKind Flags,
  3490. llvm::GlobalValue::LinkageTypes Linkage) {
  3491. if (CGM.getLangOpts().OpenMPIsDevice) {
  3492. auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
  3493. assert(Entry.isValid() && Entry.getFlags() == Flags &&
  3494. "Entry not initialized!");
  3495. assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
  3496. "Resetting with the new address.");
  3497. if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName))
  3498. return;
  3499. Entry.setAddress(Addr);
  3500. Entry.setVarSize(VarSize);
  3501. Entry.setLinkage(Linkage);
  3502. } else {
  3503. if (hasDeviceGlobalVarEntryInfo(VarName))
  3504. return;
  3505. OffloadEntriesDeviceGlobalVar.try_emplace(
  3506. VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
  3507. ++OffloadingEntriesNum;
  3508. }
  3509. }
  3510. void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
  3511. actOnDeviceGlobalVarEntriesInfo(
  3512. const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
  3513. // Scan all target region entries and perform the provided action.
  3514. for (const auto &E : OffloadEntriesDeviceGlobalVar)
  3515. Action(E.getKey(), E.getValue());
  3516. }
  3517. llvm::Function *
  3518. CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
  3519. // If we don't have entries or if we are emitting code for the device, we
  3520. // don't need to do anything.
  3521. if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
  3522. return nullptr;
  3523. llvm::Module &M = CGM.getModule();
  3524. ASTContext &C = CGM.getContext();
  3525. // Get list of devices we care about
  3526. const std::vector<llvm::Triple> &Devices = CGM.getLangOpts().OMPTargetTriples;
  3527. // We should be creating an offloading descriptor only if there are devices
  3528. // specified.
  3529. assert(!Devices.empty() && "No OpenMP offloading devices??");
  3530. // Create the external variables that will point to the begin and end of the
  3531. // host entries section. These will be defined by the linker.
  3532. llvm::Type *OffloadEntryTy =
  3533. CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
  3534. std::string EntriesBeginName = getName({"omp_offloading", "entries_begin"});
  3535. auto *HostEntriesBegin = new llvm::GlobalVariable(
  3536. M, OffloadEntryTy, /*isConstant=*/true,
  3537. llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
  3538. EntriesBeginName);
  3539. std::string EntriesEndName = getName({"omp_offloading", "entries_end"});
  3540. auto *HostEntriesEnd =
  3541. new llvm::GlobalVariable(M, OffloadEntryTy, /*isConstant=*/true,
  3542. llvm::GlobalValue::ExternalLinkage,
  3543. /*Initializer=*/nullptr, EntriesEndName);
  3544. // Create all device images
  3545. auto *DeviceImageTy = cast<llvm::StructType>(
  3546. CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
  3547. ConstantInitBuilder DeviceImagesBuilder(CGM);
  3548. ConstantArrayBuilder DeviceImagesEntries =
  3549. DeviceImagesBuilder.beginArray(DeviceImageTy);
  3550. for (const llvm::Triple &Device : Devices) {
  3551. StringRef T = Device.getTriple();
  3552. std::string BeginName = getName({"omp_offloading", "img_start", ""});
  3553. auto *ImgBegin = new llvm::GlobalVariable(
  3554. M, CGM.Int8Ty, /*isConstant=*/true,
  3555. llvm::GlobalValue::ExternalWeakLinkage,
  3556. /*Initializer=*/nullptr, Twine(BeginName).concat(T));
  3557. std::string EndName = getName({"omp_offloading", "img_end", ""});
  3558. auto *ImgEnd = new llvm::GlobalVariable(
  3559. M, CGM.Int8Ty, /*isConstant=*/true,
  3560. llvm::GlobalValue::ExternalWeakLinkage,
  3561. /*Initializer=*/nullptr, Twine(EndName).concat(T));
  3562. llvm::Constant *Data[] = {ImgBegin, ImgEnd, HostEntriesBegin,
  3563. HostEntriesEnd};
  3564. createConstantGlobalStructAndAddToParent(CGM, getTgtDeviceImageQTy(), Data,
  3565. DeviceImagesEntries);
  3566. }
  3567. // Create device images global array.
  3568. std::string ImagesName = getName({"omp_offloading", "device_images"});
  3569. llvm::GlobalVariable *DeviceImages =
  3570. DeviceImagesEntries.finishAndCreateGlobal(ImagesName,
  3571. CGM.getPointerAlign(),
  3572. /*isConstant=*/true);
  3573. DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
  3574. // This is a Zero array to be used in the creation of the constant expressions
  3575. llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
  3576. llvm::Constant::getNullValue(CGM.Int32Ty)};
  3577. // Create the target region descriptor.
  3578. llvm::Constant *Data[] = {
  3579. llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
  3580. llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
  3581. DeviceImages, Index),
  3582. HostEntriesBegin, HostEntriesEnd};
  3583. std::string Descriptor = getName({"omp_offloading", "descriptor"});
  3584. llvm::GlobalVariable *Desc = createGlobalStruct(
  3585. CGM, getTgtBinaryDescriptorQTy(), /*IsConstant=*/true, Data, Descriptor);
  3586. // Emit code to register or unregister the descriptor at execution
  3587. // startup or closing, respectively.
  3588. llvm::Function *UnRegFn;
  3589. {
  3590. FunctionArgList Args;
  3591. ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
  3592. Args.push_back(&DummyPtr);
  3593. CodeGenFunction CGF(CGM);
  3594. // Disable debug info for global (de-)initializer because they are not part
  3595. // of some particular construct.
  3596. CGF.disableDebugInfo();
  3597. const auto &FI =
  3598. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  3599. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  3600. std::string UnregName = getName({"omp_offloading", "descriptor_unreg"});
  3601. UnRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, UnregName, FI);
  3602. CGF.StartFunction(GlobalDecl(), C.VoidTy, UnRegFn, FI, Args);
  3603. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
  3604. Desc);
  3605. CGF.FinishFunction();
  3606. }
  3607. llvm::Function *RegFn;
  3608. {
  3609. CodeGenFunction CGF(CGM);
  3610. // Disable debug info for global (de-)initializer because they are not part
  3611. // of some particular construct.
  3612. CGF.disableDebugInfo();
  3613. const auto &FI = CGM.getTypes().arrangeNullaryFunction();
  3614. llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
  3615. // Encode offload target triples into the registration function name. It
  3616. // will serve as a comdat key for the registration/unregistration code for
  3617. // this particular combination of offloading targets.
  3618. SmallVector<StringRef, 4U> RegFnNameParts(Devices.size() + 2U);
  3619. RegFnNameParts[0] = "omp_offloading";
  3620. RegFnNameParts[1] = "descriptor_reg";
  3621. llvm::transform(Devices, std::next(RegFnNameParts.begin(), 2),
  3622. [](const llvm::Triple &T) -> const std::string& {
  3623. return T.getTriple();
  3624. });
  3625. llvm::sort(std::next(RegFnNameParts.begin(), 2), RegFnNameParts.end());
  3626. std::string Descriptor = getName(RegFnNameParts);
  3627. RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
  3628. CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
  3629. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib), Desc);
  3630. // Create a variable to drive the registration and unregistration of the
  3631. // descriptor, so we can reuse the logic that emits Ctors and Dtors.
  3632. ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(),
  3633. SourceLocation(), nullptr, C.CharTy,
  3634. ImplicitParamDecl::Other);
  3635. CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
  3636. CGF.FinishFunction();
  3637. }
  3638. if (CGM.supportsCOMDAT()) {
  3639. // It is sufficient to call registration function only once, so create a
  3640. // COMDAT group for registration/unregistration functions and associated
  3641. // data. That would reduce startup time and code size. Registration
  3642. // function serves as a COMDAT group key.
  3643. llvm::Comdat *ComdatKey = M.getOrInsertComdat(RegFn->getName());
  3644. RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
  3645. RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
  3646. RegFn->setComdat(ComdatKey);
  3647. UnRegFn->setComdat(ComdatKey);
  3648. DeviceImages->setComdat(ComdatKey);
  3649. Desc->setComdat(ComdatKey);
  3650. }
  3651. return RegFn;
  3652. }
  3653. void CGOpenMPRuntime::createOffloadEntry(
  3654. llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
  3655. llvm::GlobalValue::LinkageTypes Linkage) {
  3656. StringRef Name = Addr->getName();
  3657. llvm::Module &M = CGM.getModule();
  3658. llvm::LLVMContext &C = M.getContext();
  3659. // Create constant string with the name.
  3660. llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
  3661. std::string StringName = getName({"omp_offloading", "entry_name"});
  3662. auto *Str = new llvm::GlobalVariable(
  3663. M, StrPtrInit->getType(), /*isConstant=*/true,
  3664. llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
  3665. Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
  3666. llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
  3667. llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
  3668. llvm::ConstantInt::get(CGM.SizeTy, Size),
  3669. llvm::ConstantInt::get(CGM.Int32Ty, Flags),
  3670. llvm::ConstantInt::get(CGM.Int32Ty, 0)};
  3671. std::string EntryName = getName({"omp_offloading", "entry", ""});
  3672. llvm::GlobalVariable *Entry = createGlobalStruct(
  3673. CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
  3674. Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
  3675. // The entry has to be created in the section the linker expects it to be.
  3676. std::string Section = getName({"omp_offloading", "entries"});
  3677. Entry->setSection(Section);
  3678. }
  3679. void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
  3680. // Emit the offloading entries and metadata so that the device codegen side
  3681. // can easily figure out what to emit. The produced metadata looks like
  3682. // this:
  3683. //
  3684. // !omp_offload.info = !{!1, ...}
  3685. //
  3686. // Right now we only generate metadata for function that contain target
  3687. // regions.
  3688. // If we do not have entries, we don't need to do anything.
  3689. if (OffloadEntriesInfoManager.empty())
  3690. return;
  3691. llvm::Module &M = CGM.getModule();
  3692. llvm::LLVMContext &C = M.getContext();
  3693. SmallVector<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
  3694. OrderedEntries(OffloadEntriesInfoManager.size());
  3695. llvm::SmallVector<StringRef, 16> ParentFunctions(
  3696. OffloadEntriesInfoManager.size());
  3697. // Auxiliary methods to create metadata values and strings.
  3698. auto &&GetMDInt = [this](unsigned V) {
  3699. return llvm::ConstantAsMetadata::get(
  3700. llvm::ConstantInt::get(CGM.Int32Ty, V));
  3701. };
  3702. auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
  3703. // Create the offloading info metadata node.
  3704. llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
  3705. // Create function that emits metadata for each target region entry;
  3706. auto &&TargetRegionMetadataEmitter =
  3707. [&C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt, &GetMDString](
  3708. unsigned DeviceID, unsigned FileID, StringRef ParentName,
  3709. unsigned Line,
  3710. const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
  3711. // Generate metadata for target regions. Each entry of this metadata
  3712. // contains:
  3713. // - Entry 0 -> Kind of this type of metadata (0).
  3714. // - Entry 1 -> Device ID of the file where the entry was identified.
  3715. // - Entry 2 -> File ID of the file where the entry was identified.
  3716. // - Entry 3 -> Mangled name of the function where the entry was
  3717. // identified.
  3718. // - Entry 4 -> Line in the file where the entry was identified.
  3719. // - Entry 5 -> Order the entry was created.
  3720. // The first element of the metadata node is the kind.
  3721. llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
  3722. GetMDInt(FileID), GetMDString(ParentName),
  3723. GetMDInt(Line), GetMDInt(E.getOrder())};
  3724. // Save this entry in the right position of the ordered entries array.
  3725. OrderedEntries[E.getOrder()] = &E;
  3726. ParentFunctions[E.getOrder()] = ParentName;
  3727. // Add metadata to the named metadata node.
  3728. MD->addOperand(llvm::MDNode::get(C, Ops));
  3729. };
  3730. OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
  3731. TargetRegionMetadataEmitter);
  3732. // Create function that emits metadata for each device global variable entry;
  3733. auto &&DeviceGlobalVarMetadataEmitter =
  3734. [&C, &OrderedEntries, &GetMDInt, &GetMDString,
  3735. MD](StringRef MangledName,
  3736. const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
  3737. &E) {
  3738. // Generate metadata for global variables. Each entry of this metadata
  3739. // contains:
  3740. // - Entry 0 -> Kind of this type of metadata (1).
  3741. // - Entry 1 -> Mangled name of the variable.
  3742. // - Entry 2 -> Declare target kind.
  3743. // - Entry 3 -> Order the entry was created.
  3744. // The first element of the metadata node is the kind.
  3745. llvm::Metadata *Ops[] = {
  3746. GetMDInt(E.getKind()), GetMDString(MangledName),
  3747. GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
  3748. // Save this entry in the right position of the ordered entries array.
  3749. OrderedEntries[E.getOrder()] = &E;
  3750. // Add metadata to the named metadata node.
  3751. MD->addOperand(llvm::MDNode::get(C, Ops));
  3752. };
  3753. OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
  3754. DeviceGlobalVarMetadataEmitter);
  3755. for (const auto *E : OrderedEntries) {
  3756. assert(E && "All ordered entries must exist!");
  3757. if (const auto *CE =
  3758. dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
  3759. E)) {
  3760. if (!CE->getID() || !CE->getAddress()) {
  3761. // Do not blame the entry if the parent funtion is not emitted.
  3762. StringRef FnName = ParentFunctions[CE->getOrder()];
  3763. if (!CGM.GetGlobalValue(FnName))
  3764. continue;
  3765. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3766. DiagnosticsEngine::Error,
  3767. "Offloading entry for target region is incorrect: either the "
  3768. "address or the ID is invalid.");
  3769. CGM.getDiags().Report(DiagID);
  3770. continue;
  3771. }
  3772. createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
  3773. CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
  3774. } else if (const auto *CE =
  3775. dyn_cast<OffloadEntriesInfoManagerTy::
  3776. OffloadEntryInfoDeviceGlobalVar>(E)) {
  3777. OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
  3778. static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
  3779. CE->getFlags());
  3780. switch (Flags) {
  3781. case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
  3782. if (!CE->getAddress()) {
  3783. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3784. DiagnosticsEngine::Error,
  3785. "Offloading entry for declare target variable is incorrect: the "
  3786. "address is invalid.");
  3787. CGM.getDiags().Report(DiagID);
  3788. continue;
  3789. }
  3790. // The vaiable has no definition - no need to add the entry.
  3791. if (CE->getVarSize().isZero())
  3792. continue;
  3793. break;
  3794. }
  3795. case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
  3796. assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
  3797. (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
  3798. "Declaret target link address is set.");
  3799. if (CGM.getLangOpts().OpenMPIsDevice)
  3800. continue;
  3801. if (!CE->getAddress()) {
  3802. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3803. DiagnosticsEngine::Error,
  3804. "Offloading entry for declare target variable is incorrect: the "
  3805. "address is invalid.");
  3806. CGM.getDiags().Report(DiagID);
  3807. continue;
  3808. }
  3809. break;
  3810. }
  3811. createOffloadEntry(CE->getAddress(), CE->getAddress(),
  3812. CE->getVarSize().getQuantity(), Flags,
  3813. CE->getLinkage());
  3814. } else {
  3815. llvm_unreachable("Unsupported entry kind.");
  3816. }
  3817. }
  3818. }
  3819. /// Loads all the offload entries information from the host IR
  3820. /// metadata.
  3821. void CGOpenMPRuntime::loadOffloadInfoMetadata() {
  3822. // If we are in target mode, load the metadata from the host IR. This code has
  3823. // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
  3824. if (!CGM.getLangOpts().OpenMPIsDevice)
  3825. return;
  3826. if (CGM.getLangOpts().OMPHostIRFile.empty())
  3827. return;
  3828. auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
  3829. if (auto EC = Buf.getError()) {
  3830. CGM.getDiags().Report(diag::err_cannot_open_file)
  3831. << CGM.getLangOpts().OMPHostIRFile << EC.message();
  3832. return;
  3833. }
  3834. llvm::LLVMContext C;
  3835. auto ME = expectedToErrorOrAndEmitErrors(
  3836. C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
  3837. if (auto EC = ME.getError()) {
  3838. unsigned DiagID = CGM.getDiags().getCustomDiagID(
  3839. DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
  3840. CGM.getDiags().Report(DiagID)
  3841. << CGM.getLangOpts().OMPHostIRFile << EC.message();
  3842. return;
  3843. }
  3844. llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
  3845. if (!MD)
  3846. return;
  3847. for (llvm::MDNode *MN : MD->operands()) {
  3848. auto &&GetMDInt = [MN](unsigned Idx) {
  3849. auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
  3850. return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
  3851. };
  3852. auto &&GetMDString = [MN](unsigned Idx) {
  3853. auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
  3854. return V->getString();
  3855. };
  3856. switch (GetMDInt(0)) {
  3857. default:
  3858. llvm_unreachable("Unexpected metadata!");
  3859. break;
  3860. case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
  3861. OffloadingEntryInfoTargetRegion:
  3862. OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
  3863. /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
  3864. /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
  3865. /*Order=*/GetMDInt(5));
  3866. break;
  3867. case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
  3868. OffloadingEntryInfoDeviceGlobalVar:
  3869. OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
  3870. /*MangledName=*/GetMDString(1),
  3871. static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
  3872. /*Flags=*/GetMDInt(2)),
  3873. /*Order=*/GetMDInt(3));
  3874. break;
  3875. }
  3876. }
  3877. }
  3878. void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
  3879. if (!KmpRoutineEntryPtrTy) {
  3880. // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
  3881. ASTContext &C = CGM.getContext();
  3882. QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
  3883. FunctionProtoType::ExtProtoInfo EPI;
  3884. KmpRoutineEntryPtrQTy = C.getPointerType(
  3885. C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
  3886. KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
  3887. }
  3888. }
  3889. QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
  3890. // Make sure the type of the entry is already created. This is the type we
  3891. // have to create:
  3892. // struct __tgt_offload_entry{
  3893. // void *addr; // Pointer to the offload entry info.
  3894. // // (function or global)
  3895. // char *name; // Name of the function or global.
  3896. // size_t size; // Size of the entry info (0 if it a function).
  3897. // int32_t flags; // Flags associated with the entry, e.g. 'link'.
  3898. // int32_t reserved; // Reserved, to use by the runtime library.
  3899. // };
  3900. if (TgtOffloadEntryQTy.isNull()) {
  3901. ASTContext &C = CGM.getContext();
  3902. RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
  3903. RD->startDefinition();
  3904. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  3905. addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
  3906. addFieldToRecordDecl(C, RD, C.getSizeType());
  3907. addFieldToRecordDecl(
  3908. C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
  3909. addFieldToRecordDecl(
  3910. C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
  3911. RD->completeDefinition();
  3912. RD->addAttr(PackedAttr::CreateImplicit(C));
  3913. TgtOffloadEntryQTy = C.getRecordType(RD);
  3914. }
  3915. return TgtOffloadEntryQTy;
  3916. }
  3917. QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
  3918. // These are the types we need to build:
  3919. // struct __tgt_device_image{
  3920. // void *ImageStart; // Pointer to the target code start.
  3921. // void *ImageEnd; // Pointer to the target code end.
  3922. // // We also add the host entries to the device image, as it may be useful
  3923. // // for the target runtime to have access to that information.
  3924. // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
  3925. // // the entries.
  3926. // __tgt_offload_entry *EntriesEnd; // End of the table with all the
  3927. // // entries (non inclusive).
  3928. // };
  3929. if (TgtDeviceImageQTy.isNull()) {
  3930. ASTContext &C = CGM.getContext();
  3931. RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
  3932. RD->startDefinition();
  3933. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  3934. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  3935. addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
  3936. addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
  3937. RD->completeDefinition();
  3938. TgtDeviceImageQTy = C.getRecordType(RD);
  3939. }
  3940. return TgtDeviceImageQTy;
  3941. }
  3942. QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
  3943. // struct __tgt_bin_desc{
  3944. // int32_t NumDevices; // Number of devices supported.
  3945. // __tgt_device_image *DeviceImages; // Arrays of device images
  3946. // // (one per device).
  3947. // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
  3948. // // entries.
  3949. // __tgt_offload_entry *EntriesEnd; // End of the table with all the
  3950. // // entries (non inclusive).
  3951. // };
  3952. if (TgtBinaryDescriptorQTy.isNull()) {
  3953. ASTContext &C = CGM.getContext();
  3954. RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
  3955. RD->startDefinition();
  3956. addFieldToRecordDecl(
  3957. C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
  3958. addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
  3959. addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
  3960. addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
  3961. RD->completeDefinition();
  3962. TgtBinaryDescriptorQTy = C.getRecordType(RD);
  3963. }
  3964. return TgtBinaryDescriptorQTy;
  3965. }
  3966. namespace {
  3967. struct PrivateHelpersTy {
  3968. PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
  3969. const VarDecl *PrivateElemInit)
  3970. : Original(Original), PrivateCopy(PrivateCopy),
  3971. PrivateElemInit(PrivateElemInit) {}
  3972. const VarDecl *Original;
  3973. const VarDecl *PrivateCopy;
  3974. const VarDecl *PrivateElemInit;
  3975. };
  3976. typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
  3977. } // anonymous namespace
  3978. static RecordDecl *
  3979. createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
  3980. if (!Privates.empty()) {
  3981. ASTContext &C = CGM.getContext();
  3982. // Build struct .kmp_privates_t. {
  3983. // /* private vars */
  3984. // };
  3985. RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
  3986. RD->startDefinition();
  3987. for (const auto &Pair : Privates) {
  3988. const VarDecl *VD = Pair.second.Original;
  3989. QualType Type = VD->getType().getNonReferenceType();
  3990. FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
  3991. if (VD->hasAttrs()) {
  3992. for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
  3993. E(VD->getAttrs().end());
  3994. I != E; ++I)
  3995. FD->addAttr(*I);
  3996. }
  3997. }
  3998. RD->completeDefinition();
  3999. return RD;
  4000. }
  4001. return nullptr;
  4002. }
  4003. static RecordDecl *
  4004. createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
  4005. QualType KmpInt32Ty,
  4006. QualType KmpRoutineEntryPointerQTy) {
  4007. ASTContext &C = CGM.getContext();
  4008. // Build struct kmp_task_t {
  4009. // void * shareds;
  4010. // kmp_routine_entry_t routine;
  4011. // kmp_int32 part_id;
  4012. // kmp_cmplrdata_t data1;
  4013. // kmp_cmplrdata_t data2;
  4014. // For taskloops additional fields:
  4015. // kmp_uint64 lb;
  4016. // kmp_uint64 ub;
  4017. // kmp_int64 st;
  4018. // kmp_int32 liter;
  4019. // void * reductions;
  4020. // };
  4021. RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
  4022. UD->startDefinition();
  4023. addFieldToRecordDecl(C, UD, KmpInt32Ty);
  4024. addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
  4025. UD->completeDefinition();
  4026. QualType KmpCmplrdataTy = C.getRecordType(UD);
  4027. RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
  4028. RD->startDefinition();
  4029. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  4030. addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
  4031. addFieldToRecordDecl(C, RD, KmpInt32Ty);
  4032. addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
  4033. addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
  4034. if (isOpenMPTaskLoopDirective(Kind)) {
  4035. QualType KmpUInt64Ty =
  4036. CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
  4037. QualType KmpInt64Ty =
  4038. CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
  4039. addFieldToRecordDecl(C, RD, KmpUInt64Ty);
  4040. addFieldToRecordDecl(C, RD, KmpUInt64Ty);
  4041. addFieldToRecordDecl(C, RD, KmpInt64Ty);
  4042. addFieldToRecordDecl(C, RD, KmpInt32Ty);
  4043. addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  4044. }
  4045. RD->completeDefinition();
  4046. return RD;
  4047. }
  4048. static RecordDecl *
  4049. createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
  4050. ArrayRef<PrivateDataTy> Privates) {
  4051. ASTContext &C = CGM.getContext();
  4052. // Build struct kmp_task_t_with_privates {
  4053. // kmp_task_t task_data;
  4054. // .kmp_privates_t. privates;
  4055. // };
  4056. RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
  4057. RD->startDefinition();
  4058. addFieldToRecordDecl(C, RD, KmpTaskTQTy);
  4059. if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
  4060. addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
  4061. RD->completeDefinition();
  4062. return RD;
  4063. }
  4064. /// Emit a proxy function which accepts kmp_task_t as the second
  4065. /// argument.
  4066. /// \code
  4067. /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
  4068. /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
  4069. /// For taskloops:
  4070. /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
  4071. /// tt->reductions, tt->shareds);
  4072. /// return 0;
  4073. /// }
  4074. /// \endcode
  4075. static llvm::Value *
  4076. emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
  4077. OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
  4078. QualType KmpTaskTWithPrivatesPtrQTy,
  4079. QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
  4080. QualType SharedsPtrTy, llvm::Value *TaskFunction,
  4081. llvm::Value *TaskPrivatesMap) {
  4082. ASTContext &C = CGM.getContext();
  4083. FunctionArgList Args;
  4084. ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
  4085. ImplicitParamDecl::Other);
  4086. ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  4087. KmpTaskTWithPrivatesPtrQTy.withRestrict(),
  4088. ImplicitParamDecl::Other);
  4089. Args.push_back(&GtidArg);
  4090. Args.push_back(&TaskTypeArg);
  4091. const auto &TaskEntryFnInfo =
  4092. CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
  4093. llvm::FunctionType *TaskEntryTy =
  4094. CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
  4095. std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
  4096. auto *TaskEntry = llvm::Function::Create(
  4097. TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
  4098. CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
  4099. TaskEntry->setDoesNotRecurse();
  4100. CodeGenFunction CGF(CGM);
  4101. CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
  4102. Loc, Loc);
  4103. // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
  4104. // tt,
  4105. // For taskloops:
  4106. // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
  4107. // tt->task_data.shareds);
  4108. llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
  4109. CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
  4110. LValue TDBase = CGF.EmitLoadOfPointerLValue(
  4111. CGF.GetAddrOfLocalVar(&TaskTypeArg),
  4112. KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
  4113. const auto *KmpTaskTWithPrivatesQTyRD =
  4114. cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
  4115. LValue Base =
  4116. CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
  4117. const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
  4118. auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
  4119. LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
  4120. llvm::Value *PartidParam = PartIdLVal.getPointer();
  4121. auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
  4122. LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
  4123. llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4124. CGF.EmitLoadOfScalar(SharedsLVal, Loc),
  4125. CGF.ConvertTypeForMem(SharedsPtrTy));
  4126. auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
  4127. llvm::Value *PrivatesParam;
  4128. if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
  4129. LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
  4130. PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4131. PrivatesLVal.getPointer(), CGF.VoidPtrTy);
  4132. } else {
  4133. PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  4134. }
  4135. llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
  4136. TaskPrivatesMap,
  4137. CGF.Builder
  4138. .CreatePointerBitCastOrAddrSpaceCast(
  4139. TDBase.getAddress(), CGF.VoidPtrTy)
  4140. .getPointer()};
  4141. SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
  4142. std::end(CommonArgs));
  4143. if (isOpenMPTaskLoopDirective(Kind)) {
  4144. auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
  4145. LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
  4146. llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
  4147. auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
  4148. LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
  4149. llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
  4150. auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
  4151. LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
  4152. llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
  4153. auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
  4154. LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
  4155. llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
  4156. auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
  4157. LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
  4158. llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
  4159. CallArgs.push_back(LBParam);
  4160. CallArgs.push_back(UBParam);
  4161. CallArgs.push_back(StParam);
  4162. CallArgs.push_back(LIParam);
  4163. CallArgs.push_back(RParam);
  4164. }
  4165. CallArgs.push_back(SharedsParam);
  4166. CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
  4167. CallArgs);
  4168. CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
  4169. CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
  4170. CGF.FinishFunction();
  4171. return TaskEntry;
  4172. }
  4173. static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
  4174. SourceLocation Loc,
  4175. QualType KmpInt32Ty,
  4176. QualType KmpTaskTWithPrivatesPtrQTy,
  4177. QualType KmpTaskTWithPrivatesQTy) {
  4178. ASTContext &C = CGM.getContext();
  4179. FunctionArgList Args;
  4180. ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
  4181. ImplicitParamDecl::Other);
  4182. ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  4183. KmpTaskTWithPrivatesPtrQTy.withRestrict(),
  4184. ImplicitParamDecl::Other);
  4185. Args.push_back(&GtidArg);
  4186. Args.push_back(&TaskTypeArg);
  4187. const auto &DestructorFnInfo =
  4188. CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
  4189. llvm::FunctionType *DestructorFnTy =
  4190. CGM.getTypes().GetFunctionType(DestructorFnInfo);
  4191. std::string Name =
  4192. CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
  4193. auto *DestructorFn =
  4194. llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
  4195. Name, &CGM.getModule());
  4196. CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
  4197. DestructorFnInfo);
  4198. DestructorFn->setDoesNotRecurse();
  4199. CodeGenFunction CGF(CGM);
  4200. CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
  4201. Args, Loc, Loc);
  4202. LValue Base = CGF.EmitLoadOfPointerLValue(
  4203. CGF.GetAddrOfLocalVar(&TaskTypeArg),
  4204. KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
  4205. const auto *KmpTaskTWithPrivatesQTyRD =
  4206. cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
  4207. auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
  4208. Base = CGF.EmitLValueForField(Base, *FI);
  4209. for (const auto *Field :
  4210. cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
  4211. if (QualType::DestructionKind DtorKind =
  4212. Field->getType().isDestructedType()) {
  4213. LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
  4214. CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
  4215. }
  4216. }
  4217. CGF.FinishFunction();
  4218. return DestructorFn;
  4219. }
  4220. /// Emit a privates mapping function for correct handling of private and
  4221. /// firstprivate variables.
  4222. /// \code
  4223. /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
  4224. /// **noalias priv1,..., <tyn> **noalias privn) {
  4225. /// *priv1 = &.privates.priv1;
  4226. /// ...;
  4227. /// *privn = &.privates.privn;
  4228. /// }
  4229. /// \endcode
  4230. static llvm::Value *
  4231. emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
  4232. ArrayRef<const Expr *> PrivateVars,
  4233. ArrayRef<const Expr *> FirstprivateVars,
  4234. ArrayRef<const Expr *> LastprivateVars,
  4235. QualType PrivatesQTy,
  4236. ArrayRef<PrivateDataTy> Privates) {
  4237. ASTContext &C = CGM.getContext();
  4238. FunctionArgList Args;
  4239. ImplicitParamDecl TaskPrivatesArg(
  4240. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  4241. C.getPointerType(PrivatesQTy).withConst().withRestrict(),
  4242. ImplicitParamDecl::Other);
  4243. Args.push_back(&TaskPrivatesArg);
  4244. llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
  4245. unsigned Counter = 1;
  4246. for (const Expr *E : PrivateVars) {
  4247. Args.push_back(ImplicitParamDecl::Create(
  4248. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  4249. C.getPointerType(C.getPointerType(E->getType()))
  4250. .withConst()
  4251. .withRestrict(),
  4252. ImplicitParamDecl::Other));
  4253. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  4254. PrivateVarsPos[VD] = Counter;
  4255. ++Counter;
  4256. }
  4257. for (const Expr *E : FirstprivateVars) {
  4258. Args.push_back(ImplicitParamDecl::Create(
  4259. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  4260. C.getPointerType(C.getPointerType(E->getType()))
  4261. .withConst()
  4262. .withRestrict(),
  4263. ImplicitParamDecl::Other));
  4264. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  4265. PrivateVarsPos[VD] = Counter;
  4266. ++Counter;
  4267. }
  4268. for (const Expr *E : LastprivateVars) {
  4269. Args.push_back(ImplicitParamDecl::Create(
  4270. C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  4271. C.getPointerType(C.getPointerType(E->getType()))
  4272. .withConst()
  4273. .withRestrict(),
  4274. ImplicitParamDecl::Other));
  4275. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  4276. PrivateVarsPos[VD] = Counter;
  4277. ++Counter;
  4278. }
  4279. const auto &TaskPrivatesMapFnInfo =
  4280. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  4281. llvm::FunctionType *TaskPrivatesMapTy =
  4282. CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
  4283. std::string Name =
  4284. CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
  4285. auto *TaskPrivatesMap = llvm::Function::Create(
  4286. TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
  4287. &CGM.getModule());
  4288. CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
  4289. TaskPrivatesMapFnInfo);
  4290. TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
  4291. TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
  4292. TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
  4293. CodeGenFunction CGF(CGM);
  4294. CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
  4295. TaskPrivatesMapFnInfo, Args, Loc, Loc);
  4296. // *privi = &.privates.privi;
  4297. LValue Base = CGF.EmitLoadOfPointerLValue(
  4298. CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
  4299. TaskPrivatesArg.getType()->castAs<PointerType>());
  4300. const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
  4301. Counter = 0;
  4302. for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
  4303. LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
  4304. const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
  4305. LValue RefLVal =
  4306. CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
  4307. LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
  4308. RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
  4309. CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
  4310. ++Counter;
  4311. }
  4312. CGF.FinishFunction();
  4313. return TaskPrivatesMap;
  4314. }
  4315. static bool stable_sort_comparator(const PrivateDataTy P1,
  4316. const PrivateDataTy P2) {
  4317. return P1.first > P2.first;
  4318. }
  4319. /// Emit initialization for private variables in task-based directives.
  4320. static void emitPrivatesInit(CodeGenFunction &CGF,
  4321. const OMPExecutableDirective &D,
  4322. Address KmpTaskSharedsPtr, LValue TDBase,
  4323. const RecordDecl *KmpTaskTWithPrivatesQTyRD,
  4324. QualType SharedsTy, QualType SharedsPtrTy,
  4325. const OMPTaskDataTy &Data,
  4326. ArrayRef<PrivateDataTy> Privates, bool ForDup) {
  4327. ASTContext &C = CGF.getContext();
  4328. auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
  4329. LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
  4330. OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
  4331. ? OMPD_taskloop
  4332. : OMPD_task;
  4333. const CapturedStmt &CS = *D.getCapturedStmt(Kind);
  4334. CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
  4335. LValue SrcBase;
  4336. bool IsTargetTask =
  4337. isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
  4338. isOpenMPTargetExecutionDirective(D.getDirectiveKind());
  4339. // For target-based directives skip 3 firstprivate arrays BasePointersArray,
  4340. // PointersArray and SizesArray. The original variables for these arrays are
  4341. // not captured and we get their addresses explicitly.
  4342. if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
  4343. (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
  4344. SrcBase = CGF.MakeAddrLValue(
  4345. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4346. KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
  4347. SharedsTy);
  4348. }
  4349. FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
  4350. for (const PrivateDataTy &Pair : Privates) {
  4351. const VarDecl *VD = Pair.second.PrivateCopy;
  4352. const Expr *Init = VD->getAnyInitializer();
  4353. if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
  4354. !CGF.isTrivialInitializer(Init)))) {
  4355. LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
  4356. if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
  4357. const VarDecl *OriginalVD = Pair.second.Original;
  4358. // Check if the variable is the target-based BasePointersArray,
  4359. // PointersArray or SizesArray.
  4360. LValue SharedRefLValue;
  4361. QualType Type = OriginalVD->getType();
  4362. const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
  4363. if (IsTargetTask && !SharedField) {
  4364. assert(isa<ImplicitParamDecl>(OriginalVD) &&
  4365. isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
  4366. cast<CapturedDecl>(OriginalVD->getDeclContext())
  4367. ->getNumParams() == 0 &&
  4368. isa<TranslationUnitDecl>(
  4369. cast<CapturedDecl>(OriginalVD->getDeclContext())
  4370. ->getDeclContext()) &&
  4371. "Expected artificial target data variable.");
  4372. SharedRefLValue =
  4373. CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
  4374. } else {
  4375. SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
  4376. SharedRefLValue = CGF.MakeAddrLValue(
  4377. Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
  4378. SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
  4379. SharedRefLValue.getTBAAInfo());
  4380. }
  4381. if (Type->isArrayType()) {
  4382. // Initialize firstprivate array.
  4383. if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
  4384. // Perform simple memcpy.
  4385. CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
  4386. } else {
  4387. // Initialize firstprivate array using element-by-element
  4388. // initialization.
  4389. CGF.EmitOMPAggregateAssign(
  4390. PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
  4391. [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
  4392. Address SrcElement) {
  4393. // Clean up any temporaries needed by the initialization.
  4394. CodeGenFunction::OMPPrivateScope InitScope(CGF);
  4395. InitScope.addPrivate(
  4396. Elem, [SrcElement]() -> Address { return SrcElement; });
  4397. (void)InitScope.Privatize();
  4398. // Emit initialization for single element.
  4399. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
  4400. CGF, &CapturesInfo);
  4401. CGF.EmitAnyExprToMem(Init, DestElement,
  4402. Init->getType().getQualifiers(),
  4403. /*IsInitializer=*/false);
  4404. });
  4405. }
  4406. } else {
  4407. CodeGenFunction::OMPPrivateScope InitScope(CGF);
  4408. InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
  4409. return SharedRefLValue.getAddress();
  4410. });
  4411. (void)InitScope.Privatize();
  4412. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
  4413. CGF.EmitExprAsInit(Init, VD, PrivateLValue,
  4414. /*capturedByInit=*/false);
  4415. }
  4416. } else {
  4417. CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
  4418. }
  4419. }
  4420. ++FI;
  4421. }
  4422. }
  4423. /// Check if duplication function is required for taskloops.
  4424. static bool checkInitIsRequired(CodeGenFunction &CGF,
  4425. ArrayRef<PrivateDataTy> Privates) {
  4426. bool InitRequired = false;
  4427. for (const PrivateDataTy &Pair : Privates) {
  4428. const VarDecl *VD = Pair.second.PrivateCopy;
  4429. const Expr *Init = VD->getAnyInitializer();
  4430. InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
  4431. !CGF.isTrivialInitializer(Init));
  4432. if (InitRequired)
  4433. break;
  4434. }
  4435. return InitRequired;
  4436. }
  4437. /// Emit task_dup function (for initialization of
  4438. /// private/firstprivate/lastprivate vars and last_iter flag)
  4439. /// \code
  4440. /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
  4441. /// lastpriv) {
  4442. /// // setup lastprivate flag
  4443. /// task_dst->last = lastpriv;
  4444. /// // could be constructor calls here...
  4445. /// }
  4446. /// \endcode
  4447. static llvm::Value *
  4448. emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
  4449. const OMPExecutableDirective &D,
  4450. QualType KmpTaskTWithPrivatesPtrQTy,
  4451. const RecordDecl *KmpTaskTWithPrivatesQTyRD,
  4452. const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
  4453. QualType SharedsPtrTy, const OMPTaskDataTy &Data,
  4454. ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
  4455. ASTContext &C = CGM.getContext();
  4456. FunctionArgList Args;
  4457. ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  4458. KmpTaskTWithPrivatesPtrQTy,
  4459. ImplicitParamDecl::Other);
  4460. ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  4461. KmpTaskTWithPrivatesPtrQTy,
  4462. ImplicitParamDecl::Other);
  4463. ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
  4464. ImplicitParamDecl::Other);
  4465. Args.push_back(&DstArg);
  4466. Args.push_back(&SrcArg);
  4467. Args.push_back(&LastprivArg);
  4468. const auto &TaskDupFnInfo =
  4469. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  4470. llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
  4471. std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
  4472. auto *TaskDup = llvm::Function::Create(
  4473. TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
  4474. CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
  4475. TaskDup->setDoesNotRecurse();
  4476. CodeGenFunction CGF(CGM);
  4477. CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
  4478. Loc);
  4479. LValue TDBase = CGF.EmitLoadOfPointerLValue(
  4480. CGF.GetAddrOfLocalVar(&DstArg),
  4481. KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
  4482. // task_dst->liter = lastpriv;
  4483. if (WithLastIter) {
  4484. auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
  4485. LValue Base = CGF.EmitLValueForField(
  4486. TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
  4487. LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
  4488. llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
  4489. CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
  4490. CGF.EmitStoreOfScalar(Lastpriv, LILVal);
  4491. }
  4492. // Emit initial values for private copies (if any).
  4493. assert(!Privates.empty());
  4494. Address KmpTaskSharedsPtr = Address::invalid();
  4495. if (!Data.FirstprivateVars.empty()) {
  4496. LValue TDBase = CGF.EmitLoadOfPointerLValue(
  4497. CGF.GetAddrOfLocalVar(&SrcArg),
  4498. KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
  4499. LValue Base = CGF.EmitLValueForField(
  4500. TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
  4501. KmpTaskSharedsPtr = Address(
  4502. CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
  4503. Base, *std::next(KmpTaskTQTyRD->field_begin(),
  4504. KmpTaskTShareds)),
  4505. Loc),
  4506. CGF.getNaturalTypeAlignment(SharedsTy));
  4507. }
  4508. emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
  4509. SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
  4510. CGF.FinishFunction();
  4511. return TaskDup;
  4512. }
  4513. /// Checks if destructor function is required to be generated.
  4514. /// \return true if cleanups are required, false otherwise.
  4515. static bool
  4516. checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
  4517. bool NeedsCleanup = false;
  4518. auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
  4519. const auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
  4520. for (const FieldDecl *FD : PrivateRD->fields()) {
  4521. NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
  4522. if (NeedsCleanup)
  4523. break;
  4524. }
  4525. return NeedsCleanup;
  4526. }
  4527. CGOpenMPRuntime::TaskResultTy
  4528. CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
  4529. const OMPExecutableDirective &D,
  4530. llvm::Value *TaskFunction, QualType SharedsTy,
  4531. Address Shareds, const OMPTaskDataTy &Data) {
  4532. ASTContext &C = CGM.getContext();
  4533. llvm::SmallVector<PrivateDataTy, 4> Privates;
  4534. // Aggregate privates and sort them by the alignment.
  4535. auto I = Data.PrivateCopies.begin();
  4536. for (const Expr *E : Data.PrivateVars) {
  4537. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  4538. Privates.emplace_back(
  4539. C.getDeclAlign(VD),
  4540. PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
  4541. /*PrivateElemInit=*/nullptr));
  4542. ++I;
  4543. }
  4544. I = Data.FirstprivateCopies.begin();
  4545. auto IElemInitRef = Data.FirstprivateInits.begin();
  4546. for (const Expr *E : Data.FirstprivateVars) {
  4547. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  4548. Privates.emplace_back(
  4549. C.getDeclAlign(VD),
  4550. PrivateHelpersTy(
  4551. VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
  4552. cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
  4553. ++I;
  4554. ++IElemInitRef;
  4555. }
  4556. I = Data.LastprivateCopies.begin();
  4557. for (const Expr *E : Data.LastprivateVars) {
  4558. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
  4559. Privates.emplace_back(
  4560. C.getDeclAlign(VD),
  4561. PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
  4562. /*PrivateElemInit=*/nullptr));
  4563. ++I;
  4564. }
  4565. std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
  4566. QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
  4567. // Build type kmp_routine_entry_t (if not built yet).
  4568. emitKmpRoutineEntryT(KmpInt32Ty);
  4569. // Build type kmp_task_t (if not built yet).
  4570. if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
  4571. if (SavedKmpTaskloopTQTy.isNull()) {
  4572. SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
  4573. CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
  4574. }
  4575. KmpTaskTQTy = SavedKmpTaskloopTQTy;
  4576. } else {
  4577. assert((D.getDirectiveKind() == OMPD_task ||
  4578. isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
  4579. isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
  4580. "Expected taskloop, task or target directive");
  4581. if (SavedKmpTaskTQTy.isNull()) {
  4582. SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
  4583. CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
  4584. }
  4585. KmpTaskTQTy = SavedKmpTaskTQTy;
  4586. }
  4587. const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
  4588. // Build particular struct kmp_task_t for the given task.
  4589. const RecordDecl *KmpTaskTWithPrivatesQTyRD =
  4590. createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
  4591. QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
  4592. QualType KmpTaskTWithPrivatesPtrQTy =
  4593. C.getPointerType(KmpTaskTWithPrivatesQTy);
  4594. llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
  4595. llvm::Type *KmpTaskTWithPrivatesPtrTy =
  4596. KmpTaskTWithPrivatesTy->getPointerTo();
  4597. llvm::Value *KmpTaskTWithPrivatesTySize =
  4598. CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
  4599. QualType SharedsPtrTy = C.getPointerType(SharedsTy);
  4600. // Emit initial values for private copies (if any).
  4601. llvm::Value *TaskPrivatesMap = nullptr;
  4602. llvm::Type *TaskPrivatesMapTy =
  4603. std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
  4604. if (!Privates.empty()) {
  4605. auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
  4606. TaskPrivatesMap = emitTaskPrivateMappingFunction(
  4607. CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
  4608. FI->getType(), Privates);
  4609. TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4610. TaskPrivatesMap, TaskPrivatesMapTy);
  4611. } else {
  4612. TaskPrivatesMap = llvm::ConstantPointerNull::get(
  4613. cast<llvm::PointerType>(TaskPrivatesMapTy));
  4614. }
  4615. // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
  4616. // kmp_task_t *tt);
  4617. llvm::Value *TaskEntry = emitProxyTaskFunction(
  4618. CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
  4619. KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
  4620. TaskPrivatesMap);
  4621. // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
  4622. // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
  4623. // kmp_routine_entry_t *task_entry);
  4624. // Task flags. Format is taken from
  4625. // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
  4626. // description of kmp_tasking_flags struct.
  4627. enum {
  4628. TiedFlag = 0x1,
  4629. FinalFlag = 0x2,
  4630. DestructorsFlag = 0x8,
  4631. PriorityFlag = 0x20
  4632. };
  4633. unsigned Flags = Data.Tied ? TiedFlag : 0;
  4634. bool NeedsCleanup = false;
  4635. if (!Privates.empty()) {
  4636. NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
  4637. if (NeedsCleanup)
  4638. Flags = Flags | DestructorsFlag;
  4639. }
  4640. if (Data.Priority.getInt())
  4641. Flags = Flags | PriorityFlag;
  4642. llvm::Value *TaskFlags =
  4643. Data.Final.getPointer()
  4644. ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
  4645. CGF.Builder.getInt32(FinalFlag),
  4646. CGF.Builder.getInt32(/*C=*/0))
  4647. : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
  4648. TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
  4649. llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
  4650. llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
  4651. getThreadID(CGF, Loc), TaskFlags,
  4652. KmpTaskTWithPrivatesTySize, SharedsSize,
  4653. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4654. TaskEntry, KmpRoutineEntryPtrTy)};
  4655. llvm::Value *NewTask = CGF.EmitRuntimeCall(
  4656. createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
  4657. llvm::Value *NewTaskNewTaskTTy =
  4658. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4659. NewTask, KmpTaskTWithPrivatesPtrTy);
  4660. LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
  4661. KmpTaskTWithPrivatesQTy);
  4662. LValue TDBase =
  4663. CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
  4664. // Fill the data in the resulting kmp_task_t record.
  4665. // Copy shareds if there are any.
  4666. Address KmpTaskSharedsPtr = Address::invalid();
  4667. if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
  4668. KmpTaskSharedsPtr =
  4669. Address(CGF.EmitLoadOfScalar(
  4670. CGF.EmitLValueForField(
  4671. TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
  4672. KmpTaskTShareds)),
  4673. Loc),
  4674. CGF.getNaturalTypeAlignment(SharedsTy));
  4675. LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
  4676. LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
  4677. CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
  4678. }
  4679. // Emit initial values for private copies (if any).
  4680. TaskResultTy Result;
  4681. if (!Privates.empty()) {
  4682. emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
  4683. SharedsTy, SharedsPtrTy, Data, Privates,
  4684. /*ForDup=*/false);
  4685. if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
  4686. (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
  4687. Result.TaskDupFn = emitTaskDupFunction(
  4688. CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
  4689. KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
  4690. /*WithLastIter=*/!Data.LastprivateVars.empty());
  4691. }
  4692. }
  4693. // Fields of union "kmp_cmplrdata_t" for destructors and priority.
  4694. enum { Priority = 0, Destructors = 1 };
  4695. // Provide pointer to function with destructors for privates.
  4696. auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
  4697. const RecordDecl *KmpCmplrdataUD =
  4698. (*FI)->getType()->getAsUnionType()->getDecl();
  4699. if (NeedsCleanup) {
  4700. llvm::Value *DestructorFn = emitDestructorsFunction(
  4701. CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
  4702. KmpTaskTWithPrivatesQTy);
  4703. LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
  4704. LValue DestructorsLV = CGF.EmitLValueForField(
  4705. Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
  4706. CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4707. DestructorFn, KmpRoutineEntryPtrTy),
  4708. DestructorsLV);
  4709. }
  4710. // Set priority.
  4711. if (Data.Priority.getInt()) {
  4712. LValue Data2LV = CGF.EmitLValueForField(
  4713. TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
  4714. LValue PriorityLV = CGF.EmitLValueForField(
  4715. Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
  4716. CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
  4717. }
  4718. Result.NewTask = NewTask;
  4719. Result.TaskEntry = TaskEntry;
  4720. Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
  4721. Result.TDBase = TDBase;
  4722. Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
  4723. return Result;
  4724. }
  4725. void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
  4726. const OMPExecutableDirective &D,
  4727. llvm::Value *TaskFunction,
  4728. QualType SharedsTy, Address Shareds,
  4729. const Expr *IfCond,
  4730. const OMPTaskDataTy &Data) {
  4731. if (!CGF.HaveInsertPoint())
  4732. return;
  4733. TaskResultTy Result =
  4734. emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
  4735. llvm::Value *NewTask = Result.NewTask;
  4736. llvm::Value *TaskEntry = Result.TaskEntry;
  4737. llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
  4738. LValue TDBase = Result.TDBase;
  4739. const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
  4740. ASTContext &C = CGM.getContext();
  4741. // Process list of dependences.
  4742. Address DependenciesArray = Address::invalid();
  4743. unsigned NumDependencies = Data.Dependences.size();
  4744. if (NumDependencies) {
  4745. // Dependence kind for RTL.
  4746. enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
  4747. enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
  4748. RecordDecl *KmpDependInfoRD;
  4749. QualType FlagsTy =
  4750. C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
  4751. llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
  4752. if (KmpDependInfoTy.isNull()) {
  4753. KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
  4754. KmpDependInfoRD->startDefinition();
  4755. addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
  4756. addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
  4757. addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
  4758. KmpDependInfoRD->completeDefinition();
  4759. KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
  4760. } else {
  4761. KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
  4762. }
  4763. CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
  4764. // Define type kmp_depend_info[<Dependences.size()>];
  4765. QualType KmpDependInfoArrayTy = C.getConstantArrayType(
  4766. KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
  4767. ArrayType::Normal, /*IndexTypeQuals=*/0);
  4768. // kmp_depend_info[<Dependences.size()>] deps;
  4769. DependenciesArray =
  4770. CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
  4771. for (unsigned I = 0; I < NumDependencies; ++I) {
  4772. const Expr *E = Data.Dependences[I].second;
  4773. LValue Addr = CGF.EmitLValue(E);
  4774. llvm::Value *Size;
  4775. QualType Ty = E->getType();
  4776. if (const auto *ASE =
  4777. dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
  4778. LValue UpAddrLVal =
  4779. CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
  4780. llvm::Value *UpAddr =
  4781. CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
  4782. llvm::Value *LowIntPtr =
  4783. CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
  4784. llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
  4785. Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
  4786. } else {
  4787. Size = CGF.getTypeSize(Ty);
  4788. }
  4789. LValue Base = CGF.MakeAddrLValue(
  4790. CGF.Builder.CreateConstArrayGEP(DependenciesArray, I, DependencySize),
  4791. KmpDependInfoTy);
  4792. // deps[i].base_addr = &<Dependences[i].second>;
  4793. LValue BaseAddrLVal = CGF.EmitLValueForField(
  4794. Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
  4795. CGF.EmitStoreOfScalar(
  4796. CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
  4797. BaseAddrLVal);
  4798. // deps[i].len = sizeof(<Dependences[i].second>);
  4799. LValue LenLVal = CGF.EmitLValueForField(
  4800. Base, *std::next(KmpDependInfoRD->field_begin(), Len));
  4801. CGF.EmitStoreOfScalar(Size, LenLVal);
  4802. // deps[i].flags = <Dependences[i].first>;
  4803. RTLDependenceKindTy DepKind;
  4804. switch (Data.Dependences[I].first) {
  4805. case OMPC_DEPEND_in:
  4806. DepKind = DepIn;
  4807. break;
  4808. // Out and InOut dependencies must use the same code.
  4809. case OMPC_DEPEND_out:
  4810. case OMPC_DEPEND_inout:
  4811. DepKind = DepInOut;
  4812. break;
  4813. case OMPC_DEPEND_source:
  4814. case OMPC_DEPEND_sink:
  4815. case OMPC_DEPEND_unknown:
  4816. llvm_unreachable("Unknown task dependence type");
  4817. }
  4818. LValue FlagsLVal = CGF.EmitLValueForField(
  4819. Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
  4820. CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
  4821. FlagsLVal);
  4822. }
  4823. DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4824. CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
  4825. CGF.VoidPtrTy);
  4826. }
  4827. // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
  4828. // libcall.
  4829. // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
  4830. // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
  4831. // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
  4832. // list is not empty
  4833. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  4834. llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
  4835. llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
  4836. llvm::Value *DepTaskArgs[7];
  4837. if (NumDependencies) {
  4838. DepTaskArgs[0] = UpLoc;
  4839. DepTaskArgs[1] = ThreadID;
  4840. DepTaskArgs[2] = NewTask;
  4841. DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
  4842. DepTaskArgs[4] = DependenciesArray.getPointer();
  4843. DepTaskArgs[5] = CGF.Builder.getInt32(0);
  4844. DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  4845. }
  4846. auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
  4847. &TaskArgs,
  4848. &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
  4849. if (!Data.Tied) {
  4850. auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
  4851. LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
  4852. CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
  4853. }
  4854. if (NumDependencies) {
  4855. CGF.EmitRuntimeCall(
  4856. createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
  4857. } else {
  4858. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
  4859. TaskArgs);
  4860. }
  4861. // Check if parent region is untied and build return for untied task;
  4862. if (auto *Region =
  4863. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  4864. Region->emitUntiedSwitch(CGF);
  4865. };
  4866. llvm::Value *DepWaitTaskArgs[6];
  4867. if (NumDependencies) {
  4868. DepWaitTaskArgs[0] = UpLoc;
  4869. DepWaitTaskArgs[1] = ThreadID;
  4870. DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
  4871. DepWaitTaskArgs[3] = DependenciesArray.getPointer();
  4872. DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
  4873. DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
  4874. }
  4875. auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
  4876. NumDependencies, &DepWaitTaskArgs,
  4877. Loc](CodeGenFunction &CGF, PrePostActionTy &) {
  4878. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  4879. CodeGenFunction::RunCleanupsScope LocalScope(CGF);
  4880. // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
  4881. // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
  4882. // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
  4883. // is specified.
  4884. if (NumDependencies)
  4885. CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
  4886. DepWaitTaskArgs);
  4887. // Call proxy_task_entry(gtid, new_task);
  4888. auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
  4889. Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
  4890. Action.Enter(CGF);
  4891. llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
  4892. CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
  4893. OutlinedFnArgs);
  4894. };
  4895. // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
  4896. // kmp_task_t *new_task);
  4897. // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
  4898. // kmp_task_t *new_task);
  4899. RegionCodeGenTy RCG(CodeGen);
  4900. CommonActionTy Action(
  4901. RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
  4902. RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
  4903. RCG.setAction(Action);
  4904. RCG(CGF);
  4905. };
  4906. if (IfCond) {
  4907. emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
  4908. } else {
  4909. RegionCodeGenTy ThenRCG(ThenCodeGen);
  4910. ThenRCG(CGF);
  4911. }
  4912. }
  4913. void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
  4914. const OMPLoopDirective &D,
  4915. llvm::Value *TaskFunction,
  4916. QualType SharedsTy, Address Shareds,
  4917. const Expr *IfCond,
  4918. const OMPTaskDataTy &Data) {
  4919. if (!CGF.HaveInsertPoint())
  4920. return;
  4921. TaskResultTy Result =
  4922. emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
  4923. // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
  4924. // libcall.
  4925. // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
  4926. // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
  4927. // sched, kmp_uint64 grainsize, void *task_dup);
  4928. llvm::Value *ThreadID = getThreadID(CGF, Loc);
  4929. llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
  4930. llvm::Value *IfVal;
  4931. if (IfCond) {
  4932. IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
  4933. /*isSigned=*/true);
  4934. } else {
  4935. IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
  4936. }
  4937. LValue LBLVal = CGF.EmitLValueForField(
  4938. Result.TDBase,
  4939. *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
  4940. const auto *LBVar =
  4941. cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
  4942. CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
  4943. /*IsInitializer=*/true);
  4944. LValue UBLVal = CGF.EmitLValueForField(
  4945. Result.TDBase,
  4946. *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
  4947. const auto *UBVar =
  4948. cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
  4949. CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
  4950. /*IsInitializer=*/true);
  4951. LValue StLVal = CGF.EmitLValueForField(
  4952. Result.TDBase,
  4953. *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
  4954. const auto *StVar =
  4955. cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
  4956. CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
  4957. /*IsInitializer=*/true);
  4958. // Store reductions address.
  4959. LValue RedLVal = CGF.EmitLValueForField(
  4960. Result.TDBase,
  4961. *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
  4962. if (Data.Reductions) {
  4963. CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
  4964. } else {
  4965. CGF.EmitNullInitialization(RedLVal.getAddress(),
  4966. CGF.getContext().VoidPtrTy);
  4967. }
  4968. enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
  4969. llvm::Value *TaskArgs[] = {
  4970. UpLoc,
  4971. ThreadID,
  4972. Result.NewTask,
  4973. IfVal,
  4974. LBLVal.getPointer(),
  4975. UBLVal.getPointer(),
  4976. CGF.EmitLoadOfScalar(StLVal, Loc),
  4977. llvm::ConstantInt::getSigned(
  4978. CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler
  4979. llvm::ConstantInt::getSigned(
  4980. CGF.IntTy, Data.Schedule.getPointer()
  4981. ? Data.Schedule.getInt() ? NumTasks : Grainsize
  4982. : NoSchedule),
  4983. Data.Schedule.getPointer()
  4984. ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
  4985. /*isSigned=*/false)
  4986. : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
  4987. Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  4988. Result.TaskDupFn, CGF.VoidPtrTy)
  4989. : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
  4990. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
  4991. }
  4992. /// Emit reduction operation for each element of array (required for
  4993. /// array sections) LHS op = RHS.
  4994. /// \param Type Type of array.
  4995. /// \param LHSVar Variable on the left side of the reduction operation
  4996. /// (references element of array in original variable).
  4997. /// \param RHSVar Variable on the right side of the reduction operation
  4998. /// (references element of array in original variable).
  4999. /// \param RedOpGen Generator of reduction operation with use of LHSVar and
  5000. /// RHSVar.
  5001. static void EmitOMPAggregateReduction(
  5002. CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
  5003. const VarDecl *RHSVar,
  5004. const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
  5005. const Expr *, const Expr *)> &RedOpGen,
  5006. const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
  5007. const Expr *UpExpr = nullptr) {
  5008. // Perform element-by-element initialization.
  5009. QualType ElementTy;
  5010. Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
  5011. Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
  5012. // Drill down to the base element type on both arrays.
  5013. const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
  5014. llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
  5015. llvm::Value *RHSBegin = RHSAddr.getPointer();
  5016. llvm::Value *LHSBegin = LHSAddr.getPointer();
  5017. // Cast from pointer to array type to pointer to single element.
  5018. llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
  5019. // The basic structure here is a while-do loop.
  5020. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
  5021. llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
  5022. llvm::Value *IsEmpty =
  5023. CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
  5024. CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
  5025. // Enter the loop body, making that address the current address.
  5026. llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
  5027. CGF.EmitBlock(BodyBB);
  5028. CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
  5029. llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
  5030. RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
  5031. RHSElementPHI->addIncoming(RHSBegin, EntryBB);
  5032. Address RHSElementCurrent =
  5033. Address(RHSElementPHI,
  5034. RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
  5035. llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
  5036. LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
  5037. LHSElementPHI->addIncoming(LHSBegin, EntryBB);
  5038. Address LHSElementCurrent =
  5039. Address(LHSElementPHI,
  5040. LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
  5041. // Emit copy.
  5042. CodeGenFunction::OMPPrivateScope Scope(CGF);
  5043. Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
  5044. Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
  5045. Scope.Privatize();
  5046. RedOpGen(CGF, XExpr, EExpr, UpExpr);
  5047. Scope.ForceCleanup();
  5048. // Shift the address forward by one element.
  5049. llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
  5050. LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
  5051. llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
  5052. RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
  5053. // Check whether we've reached the end.
  5054. llvm::Value *Done =
  5055. CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
  5056. CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
  5057. LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
  5058. RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
  5059. // Done.
  5060. CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
  5061. }
  5062. /// Emit reduction combiner. If the combiner is a simple expression emit it as
  5063. /// is, otherwise consider it as combiner of UDR decl and emit it as a call of
  5064. /// UDR combiner function.
  5065. static void emitReductionCombiner(CodeGenFunction &CGF,
  5066. const Expr *ReductionOp) {
  5067. if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
  5068. if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
  5069. if (const auto *DRE =
  5070. dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
  5071. if (const auto *DRD =
  5072. dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
  5073. std::pair<llvm::Function *, llvm::Function *> Reduction =
  5074. CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
  5075. RValue Func = RValue::get(Reduction.first);
  5076. CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
  5077. CGF.EmitIgnoredExpr(ReductionOp);
  5078. return;
  5079. }
  5080. CGF.EmitIgnoredExpr(ReductionOp);
  5081. }
  5082. llvm::Value *CGOpenMPRuntime::emitReductionFunction(
  5083. CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
  5084. ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
  5085. ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
  5086. ASTContext &C = CGM.getContext();
  5087. // void reduction_func(void *LHSArg, void *RHSArg);
  5088. FunctionArgList Args;
  5089. ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5090. ImplicitParamDecl::Other);
  5091. ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5092. ImplicitParamDecl::Other);
  5093. Args.push_back(&LHSArg);
  5094. Args.push_back(&RHSArg);
  5095. const auto &CGFI =
  5096. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  5097. std::string Name = getName({"omp", "reduction", "reduction_func"});
  5098. auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
  5099. llvm::GlobalValue::InternalLinkage, Name,
  5100. &CGM.getModule());
  5101. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
  5102. Fn->setDoesNotRecurse();
  5103. CodeGenFunction CGF(CGM);
  5104. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
  5105. // Dst = (void*[n])(LHSArg);
  5106. // Src = (void*[n])(RHSArg);
  5107. Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5108. CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
  5109. ArgsType), CGF.getPointerAlign());
  5110. Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5111. CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
  5112. ArgsType), CGF.getPointerAlign());
  5113. // ...
  5114. // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
  5115. // ...
  5116. CodeGenFunction::OMPPrivateScope Scope(CGF);
  5117. auto IPriv = Privates.begin();
  5118. unsigned Idx = 0;
  5119. for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
  5120. const auto *RHSVar =
  5121. cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
  5122. Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
  5123. return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
  5124. });
  5125. const auto *LHSVar =
  5126. cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
  5127. Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
  5128. return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
  5129. });
  5130. QualType PrivTy = (*IPriv)->getType();
  5131. if (PrivTy->isVariablyModifiedType()) {
  5132. // Get array size and emit VLA type.
  5133. ++Idx;
  5134. Address Elem =
  5135. CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
  5136. llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
  5137. const VariableArrayType *VLA =
  5138. CGF.getContext().getAsVariableArrayType(PrivTy);
  5139. const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
  5140. CodeGenFunction::OpaqueValueMapping OpaqueMap(
  5141. CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
  5142. CGF.EmitVariablyModifiedType(PrivTy);
  5143. }
  5144. }
  5145. Scope.Privatize();
  5146. IPriv = Privates.begin();
  5147. auto ILHS = LHSExprs.begin();
  5148. auto IRHS = RHSExprs.begin();
  5149. for (const Expr *E : ReductionOps) {
  5150. if ((*IPriv)->getType()->isArrayType()) {
  5151. // Emit reduction for array section.
  5152. const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
  5153. const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
  5154. EmitOMPAggregateReduction(
  5155. CGF, (*IPriv)->getType(), LHSVar, RHSVar,
  5156. [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
  5157. emitReductionCombiner(CGF, E);
  5158. });
  5159. } else {
  5160. // Emit reduction for array subscript or single variable.
  5161. emitReductionCombiner(CGF, E);
  5162. }
  5163. ++IPriv;
  5164. ++ILHS;
  5165. ++IRHS;
  5166. }
  5167. Scope.ForceCleanup();
  5168. CGF.FinishFunction();
  5169. return Fn;
  5170. }
  5171. void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
  5172. const Expr *ReductionOp,
  5173. const Expr *PrivateRef,
  5174. const DeclRefExpr *LHS,
  5175. const DeclRefExpr *RHS) {
  5176. if (PrivateRef->getType()->isArrayType()) {
  5177. // Emit reduction for array section.
  5178. const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
  5179. const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
  5180. EmitOMPAggregateReduction(
  5181. CGF, PrivateRef->getType(), LHSVar, RHSVar,
  5182. [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
  5183. emitReductionCombiner(CGF, ReductionOp);
  5184. });
  5185. } else {
  5186. // Emit reduction for array subscript or single variable.
  5187. emitReductionCombiner(CGF, ReductionOp);
  5188. }
  5189. }
  5190. void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
  5191. ArrayRef<const Expr *> Privates,
  5192. ArrayRef<const Expr *> LHSExprs,
  5193. ArrayRef<const Expr *> RHSExprs,
  5194. ArrayRef<const Expr *> ReductionOps,
  5195. ReductionOptionsTy Options) {
  5196. if (!CGF.HaveInsertPoint())
  5197. return;
  5198. bool WithNowait = Options.WithNowait;
  5199. bool SimpleReduction = Options.SimpleReduction;
  5200. // Next code should be emitted for reduction:
  5201. //
  5202. // static kmp_critical_name lock = { 0 };
  5203. //
  5204. // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
  5205. // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
  5206. // ...
  5207. // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
  5208. // *(Type<n>-1*)rhs[<n>-1]);
  5209. // }
  5210. //
  5211. // ...
  5212. // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
  5213. // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
  5214. // RedList, reduce_func, &<lock>)) {
  5215. // case 1:
  5216. // ...
  5217. // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
  5218. // ...
  5219. // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
  5220. // break;
  5221. // case 2:
  5222. // ...
  5223. // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
  5224. // ...
  5225. // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
  5226. // break;
  5227. // default:;
  5228. // }
  5229. //
  5230. // if SimpleReduction is true, only the next code is generated:
  5231. // ...
  5232. // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
  5233. // ...
  5234. ASTContext &C = CGM.getContext();
  5235. if (SimpleReduction) {
  5236. CodeGenFunction::RunCleanupsScope Scope(CGF);
  5237. auto IPriv = Privates.begin();
  5238. auto ILHS = LHSExprs.begin();
  5239. auto IRHS = RHSExprs.begin();
  5240. for (const Expr *E : ReductionOps) {
  5241. emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
  5242. cast<DeclRefExpr>(*IRHS));
  5243. ++IPriv;
  5244. ++ILHS;
  5245. ++IRHS;
  5246. }
  5247. return;
  5248. }
  5249. // 1. Build a list of reduction variables.
  5250. // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
  5251. auto Size = RHSExprs.size();
  5252. for (const Expr *E : Privates) {
  5253. if (E->getType()->isVariablyModifiedType())
  5254. // Reserve place for array size.
  5255. ++Size;
  5256. }
  5257. llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
  5258. QualType ReductionArrayTy =
  5259. C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
  5260. /*IndexTypeQuals=*/0);
  5261. Address ReductionList =
  5262. CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
  5263. auto IPriv = Privates.begin();
  5264. unsigned Idx = 0;
  5265. for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
  5266. Address Elem =
  5267. CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
  5268. CGF.Builder.CreateStore(
  5269. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5270. CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
  5271. Elem);
  5272. if ((*IPriv)->getType()->isVariablyModifiedType()) {
  5273. // Store array size.
  5274. ++Idx;
  5275. Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
  5276. CGF.getPointerSize());
  5277. llvm::Value *Size = CGF.Builder.CreateIntCast(
  5278. CGF.getVLASize(
  5279. CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
  5280. .NumElts,
  5281. CGF.SizeTy, /*isSigned=*/false);
  5282. CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
  5283. Elem);
  5284. }
  5285. }
  5286. // 2. Emit reduce_func().
  5287. llvm::Value *ReductionFn = emitReductionFunction(
  5288. CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
  5289. Privates, LHSExprs, RHSExprs, ReductionOps);
  5290. // 3. Create static kmp_critical_name lock = { 0 };
  5291. std::string Name = getName({"reduction"});
  5292. llvm::Value *Lock = getCriticalRegionLock(Name);
  5293. // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
  5294. // RedList, reduce_func, &<lock>);
  5295. llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
  5296. llvm::Value *ThreadId = getThreadID(CGF, Loc);
  5297. llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
  5298. llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5299. ReductionList.getPointer(), CGF.VoidPtrTy);
  5300. llvm::Value *Args[] = {
  5301. IdentTLoc, // ident_t *<loc>
  5302. ThreadId, // i32 <gtid>
  5303. CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
  5304. ReductionArrayTySize, // size_type sizeof(RedList)
  5305. RL, // void *RedList
  5306. ReductionFn, // void (*) (void *, void *) <reduce_func>
  5307. Lock // kmp_critical_name *&<lock>
  5308. };
  5309. llvm::Value *Res = CGF.EmitRuntimeCall(
  5310. createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
  5311. : OMPRTL__kmpc_reduce),
  5312. Args);
  5313. // 5. Build switch(res)
  5314. llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
  5315. llvm::SwitchInst *SwInst =
  5316. CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
  5317. // 6. Build case 1:
  5318. // ...
  5319. // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
  5320. // ...
  5321. // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
  5322. // break;
  5323. llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
  5324. SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
  5325. CGF.EmitBlock(Case1BB);
  5326. // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
  5327. llvm::Value *EndArgs[] = {
  5328. IdentTLoc, // ident_t *<loc>
  5329. ThreadId, // i32 <gtid>
  5330. Lock // kmp_critical_name *&<lock>
  5331. };
  5332. auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
  5333. CodeGenFunction &CGF, PrePostActionTy &Action) {
  5334. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  5335. auto IPriv = Privates.begin();
  5336. auto ILHS = LHSExprs.begin();
  5337. auto IRHS = RHSExprs.begin();
  5338. for (const Expr *E : ReductionOps) {
  5339. RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
  5340. cast<DeclRefExpr>(*IRHS));
  5341. ++IPriv;
  5342. ++ILHS;
  5343. ++IRHS;
  5344. }
  5345. };
  5346. RegionCodeGenTy RCG(CodeGen);
  5347. CommonActionTy Action(
  5348. nullptr, llvm::None,
  5349. createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
  5350. : OMPRTL__kmpc_end_reduce),
  5351. EndArgs);
  5352. RCG.setAction(Action);
  5353. RCG(CGF);
  5354. CGF.EmitBranch(DefaultBB);
  5355. // 7. Build case 2:
  5356. // ...
  5357. // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
  5358. // ...
  5359. // break;
  5360. llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
  5361. SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
  5362. CGF.EmitBlock(Case2BB);
  5363. auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
  5364. CodeGenFunction &CGF, PrePostActionTy &Action) {
  5365. auto ILHS = LHSExprs.begin();
  5366. auto IRHS = RHSExprs.begin();
  5367. auto IPriv = Privates.begin();
  5368. for (const Expr *E : ReductionOps) {
  5369. const Expr *XExpr = nullptr;
  5370. const Expr *EExpr = nullptr;
  5371. const Expr *UpExpr = nullptr;
  5372. BinaryOperatorKind BO = BO_Comma;
  5373. if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
  5374. if (BO->getOpcode() == BO_Assign) {
  5375. XExpr = BO->getLHS();
  5376. UpExpr = BO->getRHS();
  5377. }
  5378. }
  5379. // Try to emit update expression as a simple atomic.
  5380. const Expr *RHSExpr = UpExpr;
  5381. if (RHSExpr) {
  5382. // Analyze RHS part of the whole expression.
  5383. if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
  5384. RHSExpr->IgnoreParenImpCasts())) {
  5385. // If this is a conditional operator, analyze its condition for
  5386. // min/max reduction operator.
  5387. RHSExpr = ACO->getCond();
  5388. }
  5389. if (const auto *BORHS =
  5390. dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
  5391. EExpr = BORHS->getRHS();
  5392. BO = BORHS->getOpcode();
  5393. }
  5394. }
  5395. if (XExpr) {
  5396. const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
  5397. auto &&AtomicRedGen = [BO, VD,
  5398. Loc](CodeGenFunction &CGF, const Expr *XExpr,
  5399. const Expr *EExpr, const Expr *UpExpr) {
  5400. LValue X = CGF.EmitLValue(XExpr);
  5401. RValue E;
  5402. if (EExpr)
  5403. E = CGF.EmitAnyExpr(EExpr);
  5404. CGF.EmitOMPAtomicSimpleUpdateExpr(
  5405. X, E, BO, /*IsXLHSInRHSPart=*/true,
  5406. llvm::AtomicOrdering::Monotonic, Loc,
  5407. [&CGF, UpExpr, VD, Loc](RValue XRValue) {
  5408. CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
  5409. PrivateScope.addPrivate(
  5410. VD, [&CGF, VD, XRValue, Loc]() {
  5411. Address LHSTemp = CGF.CreateMemTemp(VD->getType());
  5412. CGF.emitOMPSimpleStore(
  5413. CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
  5414. VD->getType().getNonReferenceType(), Loc);
  5415. return LHSTemp;
  5416. });
  5417. (void)PrivateScope.Privatize();
  5418. return CGF.EmitAnyExpr(UpExpr);
  5419. });
  5420. };
  5421. if ((*IPriv)->getType()->isArrayType()) {
  5422. // Emit atomic reduction for array section.
  5423. const auto *RHSVar =
  5424. cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
  5425. EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
  5426. AtomicRedGen, XExpr, EExpr, UpExpr);
  5427. } else {
  5428. // Emit atomic reduction for array subscript or single variable.
  5429. AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
  5430. }
  5431. } else {
  5432. // Emit as a critical region.
  5433. auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
  5434. const Expr *, const Expr *) {
  5435. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  5436. std::string Name = RT.getName({"atomic_reduction"});
  5437. RT.emitCriticalRegion(
  5438. CGF, Name,
  5439. [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
  5440. Action.Enter(CGF);
  5441. emitReductionCombiner(CGF, E);
  5442. },
  5443. Loc);
  5444. };
  5445. if ((*IPriv)->getType()->isArrayType()) {
  5446. const auto *LHSVar =
  5447. cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
  5448. const auto *RHSVar =
  5449. cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
  5450. EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
  5451. CritRedGen);
  5452. } else {
  5453. CritRedGen(CGF, nullptr, nullptr, nullptr);
  5454. }
  5455. }
  5456. ++ILHS;
  5457. ++IRHS;
  5458. ++IPriv;
  5459. }
  5460. };
  5461. RegionCodeGenTy AtomicRCG(AtomicCodeGen);
  5462. if (!WithNowait) {
  5463. // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
  5464. llvm::Value *EndArgs[] = {
  5465. IdentTLoc, // ident_t *<loc>
  5466. ThreadId, // i32 <gtid>
  5467. Lock // kmp_critical_name *&<lock>
  5468. };
  5469. CommonActionTy Action(nullptr, llvm::None,
  5470. createRuntimeFunction(OMPRTL__kmpc_end_reduce),
  5471. EndArgs);
  5472. AtomicRCG.setAction(Action);
  5473. AtomicRCG(CGF);
  5474. } else {
  5475. AtomicRCG(CGF);
  5476. }
  5477. CGF.EmitBranch(DefaultBB);
  5478. CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
  5479. }
  5480. /// Generates unique name for artificial threadprivate variables.
  5481. /// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
  5482. static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
  5483. const Expr *Ref) {
  5484. SmallString<256> Buffer;
  5485. llvm::raw_svector_ostream Out(Buffer);
  5486. const clang::DeclRefExpr *DE;
  5487. const VarDecl *D = ::getBaseDecl(Ref, DE);
  5488. if (!D)
  5489. D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
  5490. D = D->getCanonicalDecl();
  5491. std::string Name = CGM.getOpenMPRuntime().getName(
  5492. {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
  5493. Out << Prefix << Name << "_"
  5494. << D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
  5495. return Out.str();
  5496. }
  5497. /// Emits reduction initializer function:
  5498. /// \code
  5499. /// void @.red_init(void* %arg) {
  5500. /// %0 = bitcast void* %arg to <type>*
  5501. /// store <type> <init>, <type>* %0
  5502. /// ret void
  5503. /// }
  5504. /// \endcode
  5505. static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
  5506. SourceLocation Loc,
  5507. ReductionCodeGen &RCG, unsigned N) {
  5508. ASTContext &C = CGM.getContext();
  5509. FunctionArgList Args;
  5510. ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5511. ImplicitParamDecl::Other);
  5512. Args.emplace_back(&Param);
  5513. const auto &FnInfo =
  5514. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  5515. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  5516. std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
  5517. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  5518. Name, &CGM.getModule());
  5519. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  5520. Fn->setDoesNotRecurse();
  5521. CodeGenFunction CGF(CGM);
  5522. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
  5523. Address PrivateAddr = CGF.EmitLoadOfPointer(
  5524. CGF.GetAddrOfLocalVar(&Param),
  5525. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  5526. llvm::Value *Size = nullptr;
  5527. // If the size of the reduction item is non-constant, load it from global
  5528. // threadprivate variable.
  5529. if (RCG.getSizes(N).second) {
  5530. Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
  5531. CGF, CGM.getContext().getSizeType(),
  5532. generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
  5533. Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
  5534. CGM.getContext().getSizeType(), Loc);
  5535. }
  5536. RCG.emitAggregateType(CGF, N, Size);
  5537. LValue SharedLVal;
  5538. // If initializer uses initializer from declare reduction construct, emit a
  5539. // pointer to the address of the original reduction item (reuired by reduction
  5540. // initializer)
  5541. if (RCG.usesReductionInitializer(N)) {
  5542. Address SharedAddr =
  5543. CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
  5544. CGF, CGM.getContext().VoidPtrTy,
  5545. generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
  5546. SharedAddr = CGF.EmitLoadOfPointer(
  5547. SharedAddr,
  5548. CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
  5549. SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
  5550. } else {
  5551. SharedLVal = CGF.MakeNaturalAlignAddrLValue(
  5552. llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
  5553. CGM.getContext().VoidPtrTy);
  5554. }
  5555. // Emit the initializer:
  5556. // %0 = bitcast void* %arg to <type>*
  5557. // store <type> <init>, <type>* %0
  5558. RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
  5559. [](CodeGenFunction &) { return false; });
  5560. CGF.FinishFunction();
  5561. return Fn;
  5562. }
  5563. /// Emits reduction combiner function:
  5564. /// \code
  5565. /// void @.red_comb(void* %arg0, void* %arg1) {
  5566. /// %lhs = bitcast void* %arg0 to <type>*
  5567. /// %rhs = bitcast void* %arg1 to <type>*
  5568. /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
  5569. /// store <type> %2, <type>* %lhs
  5570. /// ret void
  5571. /// }
  5572. /// \endcode
  5573. static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
  5574. SourceLocation Loc,
  5575. ReductionCodeGen &RCG, unsigned N,
  5576. const Expr *ReductionOp,
  5577. const Expr *LHS, const Expr *RHS,
  5578. const Expr *PrivateRef) {
  5579. ASTContext &C = CGM.getContext();
  5580. const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
  5581. const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
  5582. FunctionArgList Args;
  5583. ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
  5584. C.VoidPtrTy, ImplicitParamDecl::Other);
  5585. ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5586. ImplicitParamDecl::Other);
  5587. Args.emplace_back(&ParamInOut);
  5588. Args.emplace_back(&ParamIn);
  5589. const auto &FnInfo =
  5590. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  5591. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  5592. std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
  5593. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  5594. Name, &CGM.getModule());
  5595. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  5596. Fn->setDoesNotRecurse();
  5597. CodeGenFunction CGF(CGM);
  5598. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
  5599. llvm::Value *Size = nullptr;
  5600. // If the size of the reduction item is non-constant, load it from global
  5601. // threadprivate variable.
  5602. if (RCG.getSizes(N).second) {
  5603. Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
  5604. CGF, CGM.getContext().getSizeType(),
  5605. generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
  5606. Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
  5607. CGM.getContext().getSizeType(), Loc);
  5608. }
  5609. RCG.emitAggregateType(CGF, N, Size);
  5610. // Remap lhs and rhs variables to the addresses of the function arguments.
  5611. // %lhs = bitcast void* %arg0 to <type>*
  5612. // %rhs = bitcast void* %arg1 to <type>*
  5613. CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
  5614. PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
  5615. // Pull out the pointer to the variable.
  5616. Address PtrAddr = CGF.EmitLoadOfPointer(
  5617. CGF.GetAddrOfLocalVar(&ParamInOut),
  5618. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  5619. return CGF.Builder.CreateElementBitCast(
  5620. PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
  5621. });
  5622. PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
  5623. // Pull out the pointer to the variable.
  5624. Address PtrAddr = CGF.EmitLoadOfPointer(
  5625. CGF.GetAddrOfLocalVar(&ParamIn),
  5626. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  5627. return CGF.Builder.CreateElementBitCast(
  5628. PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
  5629. });
  5630. PrivateScope.Privatize();
  5631. // Emit the combiner body:
  5632. // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
  5633. // store <type> %2, <type>* %lhs
  5634. CGM.getOpenMPRuntime().emitSingleReductionCombiner(
  5635. CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
  5636. cast<DeclRefExpr>(RHS));
  5637. CGF.FinishFunction();
  5638. return Fn;
  5639. }
  5640. /// Emits reduction finalizer function:
  5641. /// \code
  5642. /// void @.red_fini(void* %arg) {
  5643. /// %0 = bitcast void* %arg to <type>*
  5644. /// <destroy>(<type>* %0)
  5645. /// ret void
  5646. /// }
  5647. /// \endcode
  5648. static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
  5649. SourceLocation Loc,
  5650. ReductionCodeGen &RCG, unsigned N) {
  5651. if (!RCG.needCleanups(N))
  5652. return nullptr;
  5653. ASTContext &C = CGM.getContext();
  5654. FunctionArgList Args;
  5655. ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
  5656. ImplicitParamDecl::Other);
  5657. Args.emplace_back(&Param);
  5658. const auto &FnInfo =
  5659. CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
  5660. llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
  5661. std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
  5662. auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
  5663. Name, &CGM.getModule());
  5664. CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
  5665. Fn->setDoesNotRecurse();
  5666. CodeGenFunction CGF(CGM);
  5667. CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
  5668. Address PrivateAddr = CGF.EmitLoadOfPointer(
  5669. CGF.GetAddrOfLocalVar(&Param),
  5670. C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
  5671. llvm::Value *Size = nullptr;
  5672. // If the size of the reduction item is non-constant, load it from global
  5673. // threadprivate variable.
  5674. if (RCG.getSizes(N).second) {
  5675. Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
  5676. CGF, CGM.getContext().getSizeType(),
  5677. generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
  5678. Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
  5679. CGM.getContext().getSizeType(), Loc);
  5680. }
  5681. RCG.emitAggregateType(CGF, N, Size);
  5682. // Emit the finalizer body:
  5683. // <destroy>(<type>* %0)
  5684. RCG.emitCleanups(CGF, N, PrivateAddr);
  5685. CGF.FinishFunction();
  5686. return Fn;
  5687. }
  5688. llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
  5689. CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
  5690. ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
  5691. if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
  5692. return nullptr;
  5693. // Build typedef struct:
  5694. // kmp_task_red_input {
  5695. // void *reduce_shar; // shared reduction item
  5696. // size_t reduce_size; // size of data item
  5697. // void *reduce_init; // data initialization routine
  5698. // void *reduce_fini; // data finalization routine
  5699. // void *reduce_comb; // data combiner routine
  5700. // kmp_task_red_flags_t flags; // flags for additional info from compiler
  5701. // } kmp_task_red_input_t;
  5702. ASTContext &C = CGM.getContext();
  5703. RecordDecl *RD = C.buildImplicitRecord("kmp_task_red_input_t");
  5704. RD->startDefinition();
  5705. const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5706. const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
  5707. const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5708. const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5709. const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
  5710. const FieldDecl *FlagsFD = addFieldToRecordDecl(
  5711. C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
  5712. RD->completeDefinition();
  5713. QualType RDType = C.getRecordType(RD);
  5714. unsigned Size = Data.ReductionVars.size();
  5715. llvm::APInt ArraySize(/*numBits=*/64, Size);
  5716. QualType ArrayRDType = C.getConstantArrayType(
  5717. RDType, ArraySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
  5718. // kmp_task_red_input_t .rd_input.[Size];
  5719. Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
  5720. ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
  5721. Data.ReductionOps);
  5722. for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
  5723. // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
  5724. llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
  5725. llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
  5726. llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
  5727. TaskRedInput.getPointer(), Idxs,
  5728. /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
  5729. ".rd_input.gep.");
  5730. LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
  5731. // ElemLVal.reduce_shar = &Shareds[Cnt];
  5732. LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
  5733. RCG.emitSharedLValue(CGF, Cnt);
  5734. llvm::Value *CastedShared =
  5735. CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer());
  5736. CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
  5737. RCG.emitAggregateType(CGF, Cnt);
  5738. llvm::Value *SizeValInChars;
  5739. llvm::Value *SizeVal;
  5740. std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
  5741. // We use delayed creation/initialization for VLAs, array sections and
  5742. // custom reduction initializations. It is required because runtime does not
  5743. // provide the way to pass the sizes of VLAs/array sections to
  5744. // initializer/combiner/finalizer functions and does not pass the pointer to
  5745. // original reduction item to the initializer. Instead threadprivate global
  5746. // variables are used to store these values and use them in the functions.
  5747. bool DelayedCreation = !!SizeVal;
  5748. SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
  5749. /*isSigned=*/false);
  5750. LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
  5751. CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
  5752. // ElemLVal.reduce_init = init;
  5753. LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
  5754. llvm::Value *InitAddr =
  5755. CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
  5756. CGF.EmitStoreOfScalar(InitAddr, InitLVal);
  5757. DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
  5758. // ElemLVal.reduce_fini = fini;
  5759. LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
  5760. llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
  5761. llvm::Value *FiniAddr = Fini
  5762. ? CGF.EmitCastToVoidPtr(Fini)
  5763. : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
  5764. CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
  5765. // ElemLVal.reduce_comb = comb;
  5766. LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
  5767. llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
  5768. CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
  5769. RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
  5770. CGF.EmitStoreOfScalar(CombAddr, CombLVal);
  5771. // ElemLVal.flags = 0;
  5772. LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
  5773. if (DelayedCreation) {
  5774. CGF.EmitStoreOfScalar(
  5775. llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true),
  5776. FlagsLVal);
  5777. } else
  5778. CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
  5779. }
  5780. // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
  5781. // *data);
  5782. llvm::Value *Args[] = {
  5783. CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
  5784. /*isSigned=*/true),
  5785. llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
  5786. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
  5787. CGM.VoidPtrTy)};
  5788. return CGF.EmitRuntimeCall(
  5789. createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
  5790. }
  5791. void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
  5792. SourceLocation Loc,
  5793. ReductionCodeGen &RCG,
  5794. unsigned N) {
  5795. auto Sizes = RCG.getSizes(N);
  5796. // Emit threadprivate global variable if the type is non-constant
  5797. // (Sizes.second = nullptr).
  5798. if (Sizes.second) {
  5799. llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
  5800. /*isSigned=*/false);
  5801. Address SizeAddr = getAddrOfArtificialThreadPrivate(
  5802. CGF, CGM.getContext().getSizeType(),
  5803. generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
  5804. CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
  5805. }
  5806. // Store address of the original reduction item if custom initializer is used.
  5807. if (RCG.usesReductionInitializer(N)) {
  5808. Address SharedAddr = getAddrOfArtificialThreadPrivate(
  5809. CGF, CGM.getContext().VoidPtrTy,
  5810. generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
  5811. CGF.Builder.CreateStore(
  5812. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  5813. RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy),
  5814. SharedAddr, /*IsVolatile=*/false);
  5815. }
  5816. }
  5817. Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
  5818. SourceLocation Loc,
  5819. llvm::Value *ReductionsPtr,
  5820. LValue SharedLVal) {
  5821. // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
  5822. // *d);
  5823. llvm::Value *Args[] = {
  5824. CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
  5825. /*isSigned=*/true),
  5826. ReductionsPtr,
  5827. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(),
  5828. CGM.VoidPtrTy)};
  5829. return Address(
  5830. CGF.EmitRuntimeCall(
  5831. createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
  5832. SharedLVal.getAlignment());
  5833. }
  5834. void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
  5835. SourceLocation Loc) {
  5836. if (!CGF.HaveInsertPoint())
  5837. return;
  5838. // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
  5839. // global_tid);
  5840. llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
  5841. // Ignore return result until untied tasks are supported.
  5842. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
  5843. if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
  5844. Region->emitUntiedSwitch(CGF);
  5845. }
  5846. void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
  5847. OpenMPDirectiveKind InnerKind,
  5848. const RegionCodeGenTy &CodeGen,
  5849. bool HasCancel) {
  5850. if (!CGF.HaveInsertPoint())
  5851. return;
  5852. InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
  5853. CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
  5854. }
  5855. namespace {
  5856. enum RTCancelKind {
  5857. CancelNoreq = 0,
  5858. CancelParallel = 1,
  5859. CancelLoop = 2,
  5860. CancelSections = 3,
  5861. CancelTaskgroup = 4
  5862. };
  5863. } // anonymous namespace
  5864. static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
  5865. RTCancelKind CancelKind = CancelNoreq;
  5866. if (CancelRegion == OMPD_parallel)
  5867. CancelKind = CancelParallel;
  5868. else if (CancelRegion == OMPD_for)
  5869. CancelKind = CancelLoop;
  5870. else if (CancelRegion == OMPD_sections)
  5871. CancelKind = CancelSections;
  5872. else {
  5873. assert(CancelRegion == OMPD_taskgroup);
  5874. CancelKind = CancelTaskgroup;
  5875. }
  5876. return CancelKind;
  5877. }
  5878. void CGOpenMPRuntime::emitCancellationPointCall(
  5879. CodeGenFunction &CGF, SourceLocation Loc,
  5880. OpenMPDirectiveKind CancelRegion) {
  5881. if (!CGF.HaveInsertPoint())
  5882. return;
  5883. // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
  5884. // global_tid, kmp_int32 cncl_kind);
  5885. if (auto *OMPRegionInfo =
  5886. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
  5887. // For 'cancellation point taskgroup', the task region info may not have a
  5888. // cancel. This may instead happen in another adjacent task.
  5889. if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
  5890. llvm::Value *Args[] = {
  5891. emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
  5892. CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
  5893. // Ignore return result until untied tasks are supported.
  5894. llvm::Value *Result = CGF.EmitRuntimeCall(
  5895. createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
  5896. // if (__kmpc_cancellationpoint()) {
  5897. // exit from construct;
  5898. // }
  5899. llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
  5900. llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
  5901. llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
  5902. CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
  5903. CGF.EmitBlock(ExitBB);
  5904. // exit from construct;
  5905. CodeGenFunction::JumpDest CancelDest =
  5906. CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
  5907. CGF.EmitBranchThroughCleanup(CancelDest);
  5908. CGF.EmitBlock(ContBB, /*IsFinished=*/true);
  5909. }
  5910. }
  5911. }
  5912. void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
  5913. const Expr *IfCond,
  5914. OpenMPDirectiveKind CancelRegion) {
  5915. if (!CGF.HaveInsertPoint())
  5916. return;
  5917. // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
  5918. // kmp_int32 cncl_kind);
  5919. if (auto *OMPRegionInfo =
  5920. dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
  5921. auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
  5922. PrePostActionTy &) {
  5923. CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
  5924. llvm::Value *Args[] = {
  5925. RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
  5926. CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
  5927. // Ignore return result until untied tasks are supported.
  5928. llvm::Value *Result = CGF.EmitRuntimeCall(
  5929. RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
  5930. // if (__kmpc_cancel()) {
  5931. // exit from construct;
  5932. // }
  5933. llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
  5934. llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
  5935. llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
  5936. CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
  5937. CGF.EmitBlock(ExitBB);
  5938. // exit from construct;
  5939. CodeGenFunction::JumpDest CancelDest =
  5940. CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
  5941. CGF.EmitBranchThroughCleanup(CancelDest);
  5942. CGF.EmitBlock(ContBB, /*IsFinished=*/true);
  5943. };
  5944. if (IfCond) {
  5945. emitOMPIfClause(CGF, IfCond, ThenGen,
  5946. [](CodeGenFunction &, PrePostActionTy &) {});
  5947. } else {
  5948. RegionCodeGenTy ThenRCG(ThenGen);
  5949. ThenRCG(CGF);
  5950. }
  5951. }
  5952. }
  5953. void CGOpenMPRuntime::emitTargetOutlinedFunction(
  5954. const OMPExecutableDirective &D, StringRef ParentName,
  5955. llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
  5956. bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
  5957. assert(!ParentName.empty() && "Invalid target region parent name!");
  5958. emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
  5959. IsOffloadEntry, CodeGen);
  5960. }
  5961. void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
  5962. const OMPExecutableDirective &D, StringRef ParentName,
  5963. llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
  5964. bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
  5965. // Create a unique name for the entry function using the source location
  5966. // information of the current target region. The name will be something like:
  5967. //
  5968. // __omp_offloading_DD_FFFF_PP_lBB
  5969. //
  5970. // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
  5971. // mangled name of the function that encloses the target region and BB is the
  5972. // line number of the target region.
  5973. unsigned DeviceID;
  5974. unsigned FileID;
  5975. unsigned Line;
  5976. getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), DeviceID, FileID,
  5977. Line);
  5978. SmallString<64> EntryFnName;
  5979. {
  5980. llvm::raw_svector_ostream OS(EntryFnName);
  5981. OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
  5982. << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
  5983. }
  5984. const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
  5985. CodeGenFunction CGF(CGM, true);
  5986. CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
  5987. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  5988. OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
  5989. // If this target outline function is not an offload entry, we don't need to
  5990. // register it.
  5991. if (!IsOffloadEntry)
  5992. return;
  5993. // The target region ID is used by the runtime library to identify the current
  5994. // target region, so it only has to be unique and not necessarily point to
  5995. // anything. It could be the pointer to the outlined function that implements
  5996. // the target region, but we aren't using that so that the compiler doesn't
  5997. // need to keep that, and could therefore inline the host function if proven
  5998. // worthwhile during optimization. In the other hand, if emitting code for the
  5999. // device, the ID has to be the function address so that it can retrieved from
  6000. // the offloading entry and launched by the runtime library. We also mark the
  6001. // outlined function to have external linkage in case we are emitting code for
  6002. // the device, because these functions will be entry points to the device.
  6003. if (CGM.getLangOpts().OpenMPIsDevice) {
  6004. OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
  6005. OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
  6006. OutlinedFn->setDSOLocal(false);
  6007. } else {
  6008. std::string Name = getName({EntryFnName, "region_id"});
  6009. OutlinedFnID = new llvm::GlobalVariable(
  6010. CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
  6011. llvm::GlobalValue::WeakAnyLinkage,
  6012. llvm::Constant::getNullValue(CGM.Int8Ty), Name);
  6013. }
  6014. // Register the information for the entry associated with this target region.
  6015. OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
  6016. DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
  6017. OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
  6018. }
  6019. /// discard all CompoundStmts intervening between two constructs
  6020. static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
  6021. while (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
  6022. Body = CS->body_front();
  6023. return Body;
  6024. }
  6025. /// Emit the number of teams for a target directive. Inspect the num_teams
  6026. /// clause associated with a teams construct combined or closely nested
  6027. /// with the target directive.
  6028. ///
  6029. /// Emit a team of size one for directives such as 'target parallel' that
  6030. /// have no associated teams construct.
  6031. ///
  6032. /// Otherwise, return nullptr.
  6033. static llvm::Value *
  6034. emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
  6035. CodeGenFunction &CGF,
  6036. const OMPExecutableDirective &D) {
  6037. assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
  6038. "teams directive expected to be "
  6039. "emitted only for the host!");
  6040. CGBuilderTy &Bld = CGF.Builder;
  6041. // If the target directive is combined with a teams directive:
  6042. // Return the value in the num_teams clause, if any.
  6043. // Otherwise, return 0 to denote the runtime default.
  6044. if (isOpenMPTeamsDirective(D.getDirectiveKind())) {
  6045. if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) {
  6046. CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
  6047. llvm::Value *NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
  6048. /*IgnoreResultAssign*/ true);
  6049. return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
  6050. /*IsSigned=*/true);
  6051. }
  6052. // The default value is 0.
  6053. return Bld.getInt32(0);
  6054. }
  6055. // If the target directive is combined with a parallel directive but not a
  6056. // teams directive, start one team.
  6057. if (isOpenMPParallelDirective(D.getDirectiveKind()))
  6058. return Bld.getInt32(1);
  6059. // If the current target region has a teams region enclosed, we need to get
  6060. // the number of teams to pass to the runtime function call. This is done
  6061. // by generating the expression in a inlined region. This is required because
  6062. // the expression is captured in the enclosing target environment when the
  6063. // teams directive is not combined with target.
  6064. const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
  6065. if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
  6066. ignoreCompoundStmts(CS.getCapturedStmt()))) {
  6067. if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
  6068. if (const auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
  6069. CGOpenMPInnerExprInfo CGInfo(CGF, CS);
  6070. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  6071. llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams());
  6072. return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
  6073. /*IsSigned=*/true);
  6074. }
  6075. // If we have an enclosed teams directive but no num_teams clause we use
  6076. // the default value 0.
  6077. return Bld.getInt32(0);
  6078. }
  6079. }
  6080. // No teams associated with the directive.
  6081. return nullptr;
  6082. }
  6083. /// Emit the number of threads for a target directive. Inspect the
  6084. /// thread_limit clause associated with a teams construct combined or closely
  6085. /// nested with the target directive.
  6086. ///
  6087. /// Emit the num_threads clause for directives such as 'target parallel' that
  6088. /// have no associated teams construct.
  6089. ///
  6090. /// Otherwise, return nullptr.
  6091. static llvm::Value *
  6092. emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
  6093. CodeGenFunction &CGF,
  6094. const OMPExecutableDirective &D) {
  6095. assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
  6096. "teams directive expected to be "
  6097. "emitted only for the host!");
  6098. CGBuilderTy &Bld = CGF.Builder;
  6099. //
  6100. // If the target directive is combined with a teams directive:
  6101. // Return the value in the thread_limit clause, if any.
  6102. //
  6103. // If the target directive is combined with a parallel directive:
  6104. // Return the value in the num_threads clause, if any.
  6105. //
  6106. // If both clauses are set, select the minimum of the two.
  6107. //
  6108. // If neither teams or parallel combined directives set the number of threads
  6109. // in a team, return 0 to denote the runtime default.
  6110. //
  6111. // If this is not a teams directive return nullptr.
  6112. if (isOpenMPTeamsDirective(D.getDirectiveKind()) ||
  6113. isOpenMPParallelDirective(D.getDirectiveKind())) {
  6114. llvm::Value *DefaultThreadLimitVal = Bld.getInt32(0);
  6115. llvm::Value *NumThreadsVal = nullptr;
  6116. llvm::Value *ThreadLimitVal = nullptr;
  6117. if (const auto *ThreadLimitClause =
  6118. D.getSingleClause<OMPThreadLimitClause>()) {
  6119. CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
  6120. llvm::Value *ThreadLimit =
  6121. CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
  6122. /*IgnoreResultAssign*/ true);
  6123. ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty,
  6124. /*IsSigned=*/true);
  6125. }
  6126. if (const auto *NumThreadsClause =
  6127. D.getSingleClause<OMPNumThreadsClause>()) {
  6128. CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
  6129. llvm::Value *NumThreads =
  6130. CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
  6131. /*IgnoreResultAssign*/ true);
  6132. NumThreadsVal =
  6133. Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*IsSigned=*/true);
  6134. }
  6135. // Select the lesser of thread_limit and num_threads.
  6136. if (NumThreadsVal)
  6137. ThreadLimitVal = ThreadLimitVal
  6138. ? Bld.CreateSelect(Bld.CreateICmpSLT(NumThreadsVal,
  6139. ThreadLimitVal),
  6140. NumThreadsVal, ThreadLimitVal)
  6141. : NumThreadsVal;
  6142. // Set default value passed to the runtime if either teams or a target
  6143. // parallel type directive is found but no clause is specified.
  6144. if (!ThreadLimitVal)
  6145. ThreadLimitVal = DefaultThreadLimitVal;
  6146. return ThreadLimitVal;
  6147. }
  6148. // If the current target region has a teams region enclosed, we need to get
  6149. // the thread limit to pass to the runtime function call. This is done
  6150. // by generating the expression in a inlined region. This is required because
  6151. // the expression is captured in the enclosing target environment when the
  6152. // teams directive is not combined with target.
  6153. const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
  6154. if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
  6155. ignoreCompoundStmts(CS.getCapturedStmt()))) {
  6156. if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
  6157. if (const auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
  6158. CGOpenMPInnerExprInfo CGInfo(CGF, CS);
  6159. CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
  6160. llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit());
  6161. return CGF.Builder.CreateIntCast(ThreadLimit, CGF.Int32Ty,
  6162. /*IsSigned=*/true);
  6163. }
  6164. // If we have an enclosed teams directive but no thread_limit clause we
  6165. // use the default value 0.
  6166. return CGF.Builder.getInt32(0);
  6167. }
  6168. }
  6169. // No teams associated with the directive.
  6170. return nullptr;
  6171. }
  6172. namespace {
  6173. LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
  6174. // Utility to handle information from clauses associated with a given
  6175. // construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
  6176. // It provides a convenient interface to obtain the information and generate
  6177. // code for that information.
  6178. class MappableExprsHandler {
  6179. public:
  6180. /// Values for bit flags used to specify the mapping type for
  6181. /// offloading.
  6182. enum OpenMPOffloadMappingFlags : uint64_t {
  6183. /// No flags
  6184. OMP_MAP_NONE = 0x0,
  6185. /// Allocate memory on the device and move data from host to device.
  6186. OMP_MAP_TO = 0x01,
  6187. /// Allocate memory on the device and move data from device to host.
  6188. OMP_MAP_FROM = 0x02,
  6189. /// Always perform the requested mapping action on the element, even
  6190. /// if it was already mapped before.
  6191. OMP_MAP_ALWAYS = 0x04,
  6192. /// Delete the element from the device environment, ignoring the
  6193. /// current reference count associated with the element.
  6194. OMP_MAP_DELETE = 0x08,
  6195. /// The element being mapped is a pointer-pointee pair; both the
  6196. /// pointer and the pointee should be mapped.
  6197. OMP_MAP_PTR_AND_OBJ = 0x10,
  6198. /// This flags signals that the base address of an entry should be
  6199. /// passed to the target kernel as an argument.
  6200. OMP_MAP_TARGET_PARAM = 0x20,
  6201. /// Signal that the runtime library has to return the device pointer
  6202. /// in the current position for the data being mapped. Used when we have the
  6203. /// use_device_ptr clause.
  6204. OMP_MAP_RETURN_PARAM = 0x40,
  6205. /// This flag signals that the reference being passed is a pointer to
  6206. /// private data.
  6207. OMP_MAP_PRIVATE = 0x80,
  6208. /// Pass the element to the device by value.
  6209. OMP_MAP_LITERAL = 0x100,
  6210. /// Implicit map
  6211. OMP_MAP_IMPLICIT = 0x200,
  6212. /// The 16 MSBs of the flags indicate whether the entry is member of some
  6213. /// struct/class.
  6214. OMP_MAP_MEMBER_OF = 0xffff000000000000,
  6215. LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF),
  6216. };
  6217. /// Class that associates information with a base pointer to be passed to the
  6218. /// runtime library.
  6219. class BasePointerInfo {
  6220. /// The base pointer.
  6221. llvm::Value *Ptr = nullptr;
  6222. /// The base declaration that refers to this device pointer, or null if
  6223. /// there is none.
  6224. const ValueDecl *DevPtrDecl = nullptr;
  6225. public:
  6226. BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
  6227. : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
  6228. llvm::Value *operator*() const { return Ptr; }
  6229. const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
  6230. void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
  6231. };
  6232. using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
  6233. using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
  6234. using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
  6235. /// Map between a struct and the its lowest & highest elements which have been
  6236. /// mapped.
  6237. /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
  6238. /// HE(FieldIndex, Pointer)}
  6239. struct StructRangeInfoTy {
  6240. std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
  6241. 0, Address::invalid()};
  6242. std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
  6243. 0, Address::invalid()};
  6244. Address Base = Address::invalid();
  6245. };
  6246. private:
  6247. /// Kind that defines how a device pointer has to be returned.
  6248. struct MapInfo {
  6249. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  6250. OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
  6251. OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
  6252. bool ReturnDevicePointer = false;
  6253. bool IsImplicit = false;
  6254. MapInfo() = default;
  6255. MapInfo(
  6256. OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
  6257. OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
  6258. bool ReturnDevicePointer, bool IsImplicit)
  6259. : Components(Components), MapType(MapType),
  6260. MapTypeModifier(MapTypeModifier),
  6261. ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
  6262. };
  6263. /// If use_device_ptr is used on a pointer which is a struct member and there
  6264. /// is no map information about it, then emission of that entry is deferred
  6265. /// until the whole struct has been processed.
  6266. struct DeferredDevicePtrEntryTy {
  6267. const Expr *IE = nullptr;
  6268. const ValueDecl *VD = nullptr;
  6269. DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD)
  6270. : IE(IE), VD(VD) {}
  6271. };
  6272. /// Directive from where the map clauses were extracted.
  6273. const OMPExecutableDirective &CurDir;
  6274. /// Function the directive is being generated for.
  6275. CodeGenFunction &CGF;
  6276. /// Set of all first private variables in the current directive.
  6277. llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
  6278. /// Map between device pointer declarations and their expression components.
  6279. /// The key value for declarations in 'this' is null.
  6280. llvm::DenseMap<
  6281. const ValueDecl *,
  6282. SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
  6283. DevPointersMap;
  6284. llvm::Value *getExprTypeSize(const Expr *E) const {
  6285. QualType ExprTy = E->getType().getCanonicalType();
  6286. // Reference types are ignored for mapping purposes.
  6287. if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
  6288. ExprTy = RefTy->getPointeeType().getCanonicalType();
  6289. // Given that an array section is considered a built-in type, we need to
  6290. // do the calculation based on the length of the section instead of relying
  6291. // on CGF.getTypeSize(E->getType()).
  6292. if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
  6293. QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
  6294. OAE->getBase()->IgnoreParenImpCasts())
  6295. .getCanonicalType();
  6296. // If there is no length associated with the expression, that means we
  6297. // are using the whole length of the base.
  6298. if (!OAE->getLength() && OAE->getColonLoc().isValid())
  6299. return CGF.getTypeSize(BaseTy);
  6300. llvm::Value *ElemSize;
  6301. if (const auto *PTy = BaseTy->getAs<PointerType>()) {
  6302. ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
  6303. } else {
  6304. const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
  6305. assert(ATy && "Expecting array type if not a pointer type.");
  6306. ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
  6307. }
  6308. // If we don't have a length at this point, that is because we have an
  6309. // array section with a single element.
  6310. if (!OAE->getLength())
  6311. return ElemSize;
  6312. llvm::Value *LengthVal = CGF.EmitScalarExpr(OAE->getLength());
  6313. LengthVal =
  6314. CGF.Builder.CreateIntCast(LengthVal, CGF.SizeTy, /*isSigned=*/false);
  6315. return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
  6316. }
  6317. return CGF.getTypeSize(ExprTy);
  6318. }
  6319. /// Return the corresponding bits for a given map clause modifier. Add
  6320. /// a flag marking the map as a pointer if requested. Add a flag marking the
  6321. /// map as the first one of a series of maps that relate to the same map
  6322. /// expression.
  6323. OpenMPOffloadMappingFlags getMapTypeBits(OpenMPMapClauseKind MapType,
  6324. OpenMPMapClauseKind MapTypeModifier,
  6325. bool IsImplicit, bool AddPtrFlag,
  6326. bool AddIsTargetParamFlag) const {
  6327. OpenMPOffloadMappingFlags Bits =
  6328. IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
  6329. switch (MapType) {
  6330. case OMPC_MAP_alloc:
  6331. case OMPC_MAP_release:
  6332. // alloc and release is the default behavior in the runtime library, i.e.
  6333. // if we don't pass any bits alloc/release that is what the runtime is
  6334. // going to do. Therefore, we don't need to signal anything for these two
  6335. // type modifiers.
  6336. break;
  6337. case OMPC_MAP_to:
  6338. Bits |= OMP_MAP_TO;
  6339. break;
  6340. case OMPC_MAP_from:
  6341. Bits |= OMP_MAP_FROM;
  6342. break;
  6343. case OMPC_MAP_tofrom:
  6344. Bits |= OMP_MAP_TO | OMP_MAP_FROM;
  6345. break;
  6346. case OMPC_MAP_delete:
  6347. Bits |= OMP_MAP_DELETE;
  6348. break;
  6349. case OMPC_MAP_always:
  6350. case OMPC_MAP_unknown:
  6351. llvm_unreachable("Unexpected map type!");
  6352. }
  6353. if (AddPtrFlag)
  6354. Bits |= OMP_MAP_PTR_AND_OBJ;
  6355. if (AddIsTargetParamFlag)
  6356. Bits |= OMP_MAP_TARGET_PARAM;
  6357. if (MapTypeModifier == OMPC_MAP_always)
  6358. Bits |= OMP_MAP_ALWAYS;
  6359. return Bits;
  6360. }
  6361. /// Return true if the provided expression is a final array section. A
  6362. /// final array section, is one whose length can't be proved to be one.
  6363. bool isFinalArraySectionExpression(const Expr *E) const {
  6364. const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
  6365. // It is not an array section and therefore not a unity-size one.
  6366. if (!OASE)
  6367. return false;
  6368. // An array section with no colon always refer to a single element.
  6369. if (OASE->getColonLoc().isInvalid())
  6370. return false;
  6371. const Expr *Length = OASE->getLength();
  6372. // If we don't have a length we have to check if the array has size 1
  6373. // for this dimension. Also, we should always expect a length if the
  6374. // base type is pointer.
  6375. if (!Length) {
  6376. QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
  6377. OASE->getBase()->IgnoreParenImpCasts())
  6378. .getCanonicalType();
  6379. if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
  6380. return ATy->getSize().getSExtValue() != 1;
  6381. // If we don't have a constant dimension length, we have to consider
  6382. // the current section as having any size, so it is not necessarily
  6383. // unitary. If it happen to be unity size, that's user fault.
  6384. return true;
  6385. }
  6386. // Check if the length evaluates to 1.
  6387. llvm::APSInt ConstLength;
  6388. if (!Length->EvaluateAsInt(ConstLength, CGF.getContext()))
  6389. return true; // Can have more that size 1.
  6390. return ConstLength.getSExtValue() != 1;
  6391. }
  6392. /// Generate the base pointers, section pointers, sizes and map type
  6393. /// bits for the provided map type, map modifier, and expression components.
  6394. /// \a IsFirstComponent should be set to true if the provided set of
  6395. /// components is the first associated with a capture.
  6396. void generateInfoForComponentList(
  6397. OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
  6398. OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
  6399. MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
  6400. MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
  6401. StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
  6402. bool IsImplicit,
  6403. ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
  6404. OverlappedElements = llvm::None) const {
  6405. // The following summarizes what has to be generated for each map and the
  6406. // types below. The generated information is expressed in this order:
  6407. // base pointer, section pointer, size, flags
  6408. // (to add to the ones that come from the map type and modifier).
  6409. //
  6410. // double d;
  6411. // int i[100];
  6412. // float *p;
  6413. //
  6414. // struct S1 {
  6415. // int i;
  6416. // float f[50];
  6417. // }
  6418. // struct S2 {
  6419. // int i;
  6420. // float f[50];
  6421. // S1 s;
  6422. // double *p;
  6423. // struct S2 *ps;
  6424. // }
  6425. // S2 s;
  6426. // S2 *ps;
  6427. //
  6428. // map(d)
  6429. // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
  6430. //
  6431. // map(i)
  6432. // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
  6433. //
  6434. // map(i[1:23])
  6435. // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
  6436. //
  6437. // map(p)
  6438. // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
  6439. //
  6440. // map(p[1:24])
  6441. // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
  6442. //
  6443. // map(s)
  6444. // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
  6445. //
  6446. // map(s.i)
  6447. // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
  6448. //
  6449. // map(s.s.f)
  6450. // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
  6451. //
  6452. // map(s.p)
  6453. // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
  6454. //
  6455. // map(to: s.p[:22])
  6456. // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
  6457. // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
  6458. // &(s.p), &(s.p[0]), 22*sizeof(double),
  6459. // MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
  6460. // (*) alloc space for struct members, only this is a target parameter
  6461. // (**) map the pointer (nothing to be mapped in this example) (the compiler
  6462. // optimizes this entry out, same in the examples below)
  6463. // (***) map the pointee (map: to)
  6464. //
  6465. // map(s.ps)
  6466. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
  6467. //
  6468. // map(from: s.ps->s.i)
  6469. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
  6470. // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
  6471. // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
  6472. //
  6473. // map(to: s.ps->ps)
  6474. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
  6475. // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
  6476. // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | TO
  6477. //
  6478. // map(s.ps->ps->ps)
  6479. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
  6480. // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
  6481. // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
  6482. // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
  6483. //
  6484. // map(to: s.ps->ps->s.f[:22])
  6485. // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
  6486. // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
  6487. // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
  6488. // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
  6489. //
  6490. // map(ps)
  6491. // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
  6492. //
  6493. // map(ps->i)
  6494. // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
  6495. //
  6496. // map(ps->s.f)
  6497. // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
  6498. //
  6499. // map(from: ps->p)
  6500. // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
  6501. //
  6502. // map(to: ps->p[:22])
  6503. // ps, &(ps->p), sizeof(double*), TARGET_PARAM
  6504. // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
  6505. // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
  6506. //
  6507. // map(ps->ps)
  6508. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
  6509. //
  6510. // map(from: ps->ps->s.i)
  6511. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
  6512. // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
  6513. // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
  6514. //
  6515. // map(from: ps->ps->ps)
  6516. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
  6517. // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
  6518. // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
  6519. //
  6520. // map(ps->ps->ps->ps)
  6521. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
  6522. // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
  6523. // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
  6524. // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
  6525. //
  6526. // map(to: ps->ps->ps->s.f[:22])
  6527. // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
  6528. // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
  6529. // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
  6530. // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
  6531. //
  6532. // map(to: s.f[:22]) map(from: s.p[:33])
  6533. // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
  6534. // sizeof(double*) (**), TARGET_PARAM
  6535. // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
  6536. // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
  6537. // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
  6538. // (*) allocate contiguous space needed to fit all mapped members even if
  6539. // we allocate space for members not mapped (in this example,
  6540. // s.f[22..49] and s.s are not mapped, yet we must allocate space for
  6541. // them as well because they fall between &s.f[0] and &s.p)
  6542. //
  6543. // map(from: s.f[:22]) map(to: ps->p[:33])
  6544. // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
  6545. // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
  6546. // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
  6547. // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
  6548. // (*) the struct this entry pertains to is the 2nd element in the list of
  6549. // arguments, hence MEMBER_OF(2)
  6550. //
  6551. // map(from: s.f[:22], s.s) map(to: ps->p[:33])
  6552. // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
  6553. // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
  6554. // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
  6555. // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
  6556. // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
  6557. // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
  6558. // (*) the struct this entry pertains to is the 4th element in the list
  6559. // of arguments, hence MEMBER_OF(4)
  6560. // Track if the map information being generated is the first for a capture.
  6561. bool IsCaptureFirstInfo = IsFirstComponentList;
  6562. bool IsLink = false; // Is this variable a "declare target link"?
  6563. // Scan the components from the base to the complete expression.
  6564. auto CI = Components.rbegin();
  6565. auto CE = Components.rend();
  6566. auto I = CI;
  6567. // Track if the map information being generated is the first for a list of
  6568. // components.
  6569. bool IsExpressionFirstInfo = true;
  6570. Address BP = Address::invalid();
  6571. if (isa<MemberExpr>(I->getAssociatedExpression())) {
  6572. // The base is the 'this' pointer. The content of the pointer is going
  6573. // to be the base of the field being mapped.
  6574. BP = CGF.LoadCXXThisAddress();
  6575. } else {
  6576. // The base is the reference to the variable.
  6577. // BP = &Var.
  6578. BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
  6579. if (const auto *VD =
  6580. dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
  6581. if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  6582. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
  6583. if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
  6584. IsLink = true;
  6585. BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
  6586. }
  6587. }
  6588. // If the variable is a pointer and is being dereferenced (i.e. is not
  6589. // the last component), the base has to be the pointer itself, not its
  6590. // reference. References are ignored for mapping purposes.
  6591. QualType Ty =
  6592. I->getAssociatedDeclaration()->getType().getNonReferenceType();
  6593. if (Ty->isAnyPointerType() && std::next(I) != CE) {
  6594. BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
  6595. // We do not need to generate individual map information for the
  6596. // pointer, it can be associated with the combined storage.
  6597. ++I;
  6598. }
  6599. }
  6600. // Track whether a component of the list should be marked as MEMBER_OF some
  6601. // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
  6602. // in a component list should be marked as MEMBER_OF, all subsequent entries
  6603. // do not belong to the base struct. E.g.
  6604. // struct S2 s;
  6605. // s.ps->ps->ps->f[:]
  6606. // (1) (2) (3) (4)
  6607. // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
  6608. // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
  6609. // is the pointee of ps(2) which is not member of struct s, so it should not
  6610. // be marked as such (it is still PTR_AND_OBJ).
  6611. // The variable is initialized to false so that PTR_AND_OBJ entries which
  6612. // are not struct members are not considered (e.g. array of pointers to
  6613. // data).
  6614. bool ShouldBeMemberOf = false;
  6615. // Variable keeping track of whether or not we have encountered a component
  6616. // in the component list which is a member expression. Useful when we have a
  6617. // pointer or a final array section, in which case it is the previous
  6618. // component in the list which tells us whether we have a member expression.
  6619. // E.g. X.f[:]
  6620. // While processing the final array section "[:]" it is "f" which tells us
  6621. // whether we are dealing with a member of a declared struct.
  6622. const MemberExpr *EncounteredME = nullptr;
  6623. for (; I != CE; ++I) {
  6624. // If the current component is member of a struct (parent struct) mark it.
  6625. if (!EncounteredME) {
  6626. EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
  6627. // If we encounter a PTR_AND_OBJ entry from now on it should be marked
  6628. // as MEMBER_OF the parent struct.
  6629. if (EncounteredME)
  6630. ShouldBeMemberOf = true;
  6631. }
  6632. auto Next = std::next(I);
  6633. // We need to generate the addresses and sizes if this is the last
  6634. // component, if the component is a pointer or if it is an array section
  6635. // whose length can't be proved to be one. If this is a pointer, it
  6636. // becomes the base address for the following components.
  6637. // A final array section, is one whose length can't be proved to be one.
  6638. bool IsFinalArraySection =
  6639. isFinalArraySectionExpression(I->getAssociatedExpression());
  6640. // Get information on whether the element is a pointer. Have to do a
  6641. // special treatment for array sections given that they are built-in
  6642. // types.
  6643. const auto *OASE =
  6644. dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
  6645. bool IsPointer =
  6646. (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
  6647. .getCanonicalType()
  6648. ->isAnyPointerType()) ||
  6649. I->getAssociatedExpression()->getType()->isAnyPointerType();
  6650. if (Next == CE || IsPointer || IsFinalArraySection) {
  6651. // If this is not the last component, we expect the pointer to be
  6652. // associated with an array expression or member expression.
  6653. assert((Next == CE ||
  6654. isa<MemberExpr>(Next->getAssociatedExpression()) ||
  6655. isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
  6656. isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
  6657. "Unexpected expression");
  6658. Address LB =
  6659. CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
  6660. // If this component is a pointer inside the base struct then we don't
  6661. // need to create any entry for it - it will be combined with the object
  6662. // it is pointing to into a single PTR_AND_OBJ entry.
  6663. bool IsMemberPointer =
  6664. IsPointer && EncounteredME &&
  6665. (dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
  6666. EncounteredME);
  6667. if (!OverlappedElements.empty()) {
  6668. // Handle base element with the info for overlapped elements.
  6669. assert(!PartialStruct.Base.isValid() && "The base element is set.");
  6670. assert(Next == CE &&
  6671. "Expected last element for the overlapped elements.");
  6672. assert(!IsPointer &&
  6673. "Unexpected base element with the pointer type.");
  6674. // Mark the whole struct as the struct that requires allocation on the
  6675. // device.
  6676. PartialStruct.LowestElem = {0, LB};
  6677. CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
  6678. I->getAssociatedExpression()->getType());
  6679. Address HB = CGF.Builder.CreateConstGEP(
  6680. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LB,
  6681. CGF.VoidPtrTy),
  6682. TypeSize.getQuantity() - 1, CharUnits::One());
  6683. PartialStruct.HighestElem = {
  6684. std::numeric_limits<decltype(
  6685. PartialStruct.HighestElem.first)>::max(),
  6686. HB};
  6687. PartialStruct.Base = BP;
  6688. // Emit data for non-overlapped data.
  6689. OpenMPOffloadMappingFlags Flags =
  6690. OMP_MAP_MEMBER_OF |
  6691. getMapTypeBits(MapType, MapTypeModifier, IsImplicit,
  6692. /*AddPtrFlag=*/false,
  6693. /*AddIsTargetParamFlag=*/false);
  6694. LB = BP;
  6695. llvm::Value *Size = nullptr;
  6696. // Do bitcopy of all non-overlapped structure elements.
  6697. for (OMPClauseMappableExprCommon::MappableExprComponentListRef
  6698. Component : OverlappedElements) {
  6699. Address ComponentLB = Address::invalid();
  6700. for (const OMPClauseMappableExprCommon::MappableComponent &MC :
  6701. Component) {
  6702. if (MC.getAssociatedDeclaration()) {
  6703. ComponentLB =
  6704. CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
  6705. .getAddress();
  6706. Size = CGF.Builder.CreatePtrDiff(
  6707. CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
  6708. CGF.EmitCastToVoidPtr(LB.getPointer()));
  6709. break;
  6710. }
  6711. }
  6712. BasePointers.push_back(BP.getPointer());
  6713. Pointers.push_back(LB.getPointer());
  6714. Sizes.push_back(Size);
  6715. Types.push_back(Flags);
  6716. LB = CGF.Builder.CreateConstGEP(ComponentLB, 1,
  6717. CGF.getPointerSize());
  6718. }
  6719. BasePointers.push_back(BP.getPointer());
  6720. Pointers.push_back(LB.getPointer());
  6721. Size = CGF.Builder.CreatePtrDiff(
  6722. CGF.EmitCastToVoidPtr(
  6723. CGF.Builder.CreateConstGEP(HB, 1, CharUnits::One())
  6724. .getPointer()),
  6725. CGF.EmitCastToVoidPtr(LB.getPointer()));
  6726. Sizes.push_back(Size);
  6727. Types.push_back(Flags);
  6728. break;
  6729. }
  6730. llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
  6731. if (!IsMemberPointer) {
  6732. BasePointers.push_back(BP.getPointer());
  6733. Pointers.push_back(LB.getPointer());
  6734. Sizes.push_back(Size);
  6735. // We need to add a pointer flag for each map that comes from the
  6736. // same expression except for the first one. We also need to signal
  6737. // this map is the first one that relates with the current capture
  6738. // (there is a set of entries for each capture).
  6739. OpenMPOffloadMappingFlags Flags = getMapTypeBits(
  6740. MapType, MapTypeModifier, IsImplicit,
  6741. !IsExpressionFirstInfo || IsLink, IsCaptureFirstInfo && !IsLink);
  6742. if (!IsExpressionFirstInfo) {
  6743. // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
  6744. // then we reset the TO/FROM/ALWAYS/DELETE flags.
  6745. if (IsPointer)
  6746. Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
  6747. OMP_MAP_DELETE);
  6748. if (ShouldBeMemberOf) {
  6749. // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
  6750. // should be later updated with the correct value of MEMBER_OF.
  6751. Flags |= OMP_MAP_MEMBER_OF;
  6752. // From now on, all subsequent PTR_AND_OBJ entries should not be
  6753. // marked as MEMBER_OF.
  6754. ShouldBeMemberOf = false;
  6755. }
  6756. }
  6757. Types.push_back(Flags);
  6758. }
  6759. // If we have encountered a member expression so far, keep track of the
  6760. // mapped member. If the parent is "*this", then the value declaration
  6761. // is nullptr.
  6762. if (EncounteredME) {
  6763. const auto *FD = dyn_cast<FieldDecl>(EncounteredME->getMemberDecl());
  6764. unsigned FieldIndex = FD->getFieldIndex();
  6765. // Update info about the lowest and highest elements for this struct
  6766. if (!PartialStruct.Base.isValid()) {
  6767. PartialStruct.LowestElem = {FieldIndex, LB};
  6768. PartialStruct.HighestElem = {FieldIndex, LB};
  6769. PartialStruct.Base = BP;
  6770. } else if (FieldIndex < PartialStruct.LowestElem.first) {
  6771. PartialStruct.LowestElem = {FieldIndex, LB};
  6772. } else if (FieldIndex > PartialStruct.HighestElem.first) {
  6773. PartialStruct.HighestElem = {FieldIndex, LB};
  6774. }
  6775. }
  6776. // If we have a final array section, we are done with this expression.
  6777. if (IsFinalArraySection)
  6778. break;
  6779. // The pointer becomes the base for the next element.
  6780. if (Next != CE)
  6781. BP = LB;
  6782. IsExpressionFirstInfo = false;
  6783. IsCaptureFirstInfo = false;
  6784. }
  6785. }
  6786. }
  6787. /// Return the adjusted map modifiers if the declaration a capture refers to
  6788. /// appears in a first-private clause. This is expected to be used only with
  6789. /// directives that start with 'target'.
  6790. MappableExprsHandler::OpenMPOffloadMappingFlags
  6791. getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
  6792. assert(Cap.capturesVariable() && "Expected capture by reference only!");
  6793. // A first private variable captured by reference will use only the
  6794. // 'private ptr' and 'map to' flag. Return the right flags if the captured
  6795. // declaration is known as first-private in this handler.
  6796. if (FirstPrivateDecls.count(Cap.getCapturedVar()))
  6797. return MappableExprsHandler::OMP_MAP_PRIVATE |
  6798. MappableExprsHandler::OMP_MAP_TO;
  6799. return MappableExprsHandler::OMP_MAP_TO |
  6800. MappableExprsHandler::OMP_MAP_FROM;
  6801. }
  6802. static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
  6803. // Member of is given by the 16 MSB of the flag, so rotate by 48 bits.
  6804. return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
  6805. << 48);
  6806. }
  6807. static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
  6808. OpenMPOffloadMappingFlags MemberOfFlag) {
  6809. // If the entry is PTR_AND_OBJ but has not been marked with the special
  6810. // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
  6811. // marked as MEMBER_OF.
  6812. if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
  6813. ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
  6814. return;
  6815. // Reset the placeholder value to prepare the flag for the assignment of the
  6816. // proper MEMBER_OF value.
  6817. Flags &= ~OMP_MAP_MEMBER_OF;
  6818. Flags |= MemberOfFlag;
  6819. }
  6820. void getPlainLayout(const CXXRecordDecl *RD,
  6821. llvm::SmallVectorImpl<const FieldDecl *> &Layout,
  6822. bool AsBase) const {
  6823. const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
  6824. llvm::StructType *St =
  6825. AsBase ? RL.getBaseSubobjectLLVMType() : RL.getLLVMType();
  6826. unsigned NumElements = St->getNumElements();
  6827. llvm::SmallVector<
  6828. llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>, 4>
  6829. RecordLayout(NumElements);
  6830. // Fill bases.
  6831. for (const auto &I : RD->bases()) {
  6832. if (I.isVirtual())
  6833. continue;
  6834. const auto *Base = I.getType()->getAsCXXRecordDecl();
  6835. // Ignore empty bases.
  6836. if (Base->isEmpty() || CGF.getContext()
  6837. .getASTRecordLayout(Base)
  6838. .getNonVirtualSize()
  6839. .isZero())
  6840. continue;
  6841. unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base);
  6842. RecordLayout[FieldIndex] = Base;
  6843. }
  6844. // Fill in virtual bases.
  6845. for (const auto &I : RD->vbases()) {
  6846. const auto *Base = I.getType()->getAsCXXRecordDecl();
  6847. // Ignore empty bases.
  6848. if (Base->isEmpty())
  6849. continue;
  6850. unsigned FieldIndex = RL.getVirtualBaseIndex(Base);
  6851. if (RecordLayout[FieldIndex])
  6852. continue;
  6853. RecordLayout[FieldIndex] = Base;
  6854. }
  6855. // Fill in all the fields.
  6856. assert(!RD->isUnion() && "Unexpected union.");
  6857. for (const auto *Field : RD->fields()) {
  6858. // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
  6859. // will fill in later.)
  6860. if (!Field->isBitField()) {
  6861. unsigned FieldIndex = RL.getLLVMFieldNo(Field);
  6862. RecordLayout[FieldIndex] = Field;
  6863. }
  6864. }
  6865. for (const llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>
  6866. &Data : RecordLayout) {
  6867. if (Data.isNull())
  6868. continue;
  6869. if (const auto *Base = Data.dyn_cast<const CXXRecordDecl *>())
  6870. getPlainLayout(Base, Layout, /*AsBase=*/true);
  6871. else
  6872. Layout.push_back(Data.get<const FieldDecl *>());
  6873. }
  6874. }
  6875. public:
  6876. MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
  6877. : CurDir(Dir), CGF(CGF) {
  6878. // Extract firstprivate clause information.
  6879. for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
  6880. for (const auto *D : C->varlists())
  6881. FirstPrivateDecls.insert(
  6882. cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
  6883. // Extract device pointer clause information.
  6884. for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
  6885. for (auto L : C->component_lists())
  6886. DevPointersMap[L.first].push_back(L.second);
  6887. }
  6888. /// Generate code for the combined entry if we have a partially mapped struct
  6889. /// and take care of the mapping flags of the arguments corresponding to
  6890. /// individual struct members.
  6891. void emitCombinedEntry(MapBaseValuesArrayTy &BasePointers,
  6892. MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
  6893. MapFlagsArrayTy &Types, MapFlagsArrayTy &CurTypes,
  6894. const StructRangeInfoTy &PartialStruct) const {
  6895. // Base is the base of the struct
  6896. BasePointers.push_back(PartialStruct.Base.getPointer());
  6897. // Pointer is the address of the lowest element
  6898. llvm::Value *LB = PartialStruct.LowestElem.second.getPointer();
  6899. Pointers.push_back(LB);
  6900. // Size is (addr of {highest+1} element) - (addr of lowest element)
  6901. llvm::Value *HB = PartialStruct.HighestElem.second.getPointer();
  6902. llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HB, /*Idx0=*/1);
  6903. llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
  6904. llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
  6905. llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
  6906. llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.SizeTy,
  6907. /*isSinged=*/false);
  6908. Sizes.push_back(Size);
  6909. // Map type is always TARGET_PARAM
  6910. Types.push_back(OMP_MAP_TARGET_PARAM);
  6911. // Remove TARGET_PARAM flag from the first element
  6912. (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
  6913. // All other current entries will be MEMBER_OF the combined entry
  6914. // (except for PTR_AND_OBJ entries which do not have a placeholder value
  6915. // 0xFFFF in the MEMBER_OF field).
  6916. OpenMPOffloadMappingFlags MemberOfFlag =
  6917. getMemberOfFlag(BasePointers.size() - 1);
  6918. for (auto &M : CurTypes)
  6919. setCorrectMemberOfFlag(M, MemberOfFlag);
  6920. }
  6921. /// Generate all the base pointers, section pointers, sizes and map
  6922. /// types for the extracted mappable expressions. Also, for each item that
  6923. /// relates with a device pointer, a pair of the relevant declaration and
  6924. /// index where it occurs is appended to the device pointers info array.
  6925. void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
  6926. MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
  6927. MapFlagsArrayTy &Types) const {
  6928. // We have to process the component lists that relate with the same
  6929. // declaration in a single chunk so that we can generate the map flags
  6930. // correctly. Therefore, we organize all lists in a map.
  6931. llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
  6932. // Helper function to fill the information map for the different supported
  6933. // clauses.
  6934. auto &&InfoGen = [&Info](
  6935. const ValueDecl *D,
  6936. OMPClauseMappableExprCommon::MappableExprComponentListRef L,
  6937. OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
  6938. bool ReturnDevicePointer, bool IsImplicit) {
  6939. const ValueDecl *VD =
  6940. D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
  6941. Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer,
  6942. IsImplicit);
  6943. };
  6944. // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
  6945. for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
  6946. for (const auto &L : C->component_lists()) {
  6947. InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
  6948. /*ReturnDevicePointer=*/false, C->isImplicit());
  6949. }
  6950. for (const auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
  6951. for (const auto &L : C->component_lists()) {
  6952. InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
  6953. /*ReturnDevicePointer=*/false, C->isImplicit());
  6954. }
  6955. for (const auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
  6956. for (const auto &L : C->component_lists()) {
  6957. InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
  6958. /*ReturnDevicePointer=*/false, C->isImplicit());
  6959. }
  6960. // Look at the use_device_ptr clause information and mark the existing map
  6961. // entries as such. If there is no map information for an entry in the
  6962. // use_device_ptr list, we create one with map type 'alloc' and zero size
  6963. // section. It is the user fault if that was not mapped before. If there is
  6964. // no map information and the pointer is a struct member, then we defer the
  6965. // emission of that entry until the whole struct has been processed.
  6966. llvm::MapVector<const ValueDecl *, SmallVector<DeferredDevicePtrEntryTy, 4>>
  6967. DeferredInfo;
  6968. // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
  6969. for (const auto *C :
  6970. this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>()) {
  6971. for (const auto &L : C->component_lists()) {
  6972. assert(!L.second.empty() && "Not expecting empty list of components!");
  6973. const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
  6974. VD = cast<ValueDecl>(VD->getCanonicalDecl());
  6975. const Expr *IE = L.second.back().getAssociatedExpression();
  6976. // If the first component is a member expression, we have to look into
  6977. // 'this', which maps to null in the map of map information. Otherwise
  6978. // look directly for the information.
  6979. auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
  6980. // We potentially have map information for this declaration already.
  6981. // Look for the first set of components that refer to it.
  6982. if (It != Info.end()) {
  6983. auto CI = std::find_if(
  6984. It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
  6985. return MI.Components.back().getAssociatedDeclaration() == VD;
  6986. });
  6987. // If we found a map entry, signal that the pointer has to be returned
  6988. // and move on to the next declaration.
  6989. if (CI != It->second.end()) {
  6990. CI->ReturnDevicePointer = true;
  6991. continue;
  6992. }
  6993. }
  6994. // We didn't find any match in our map information - generate a zero
  6995. // size array section - if the pointer is a struct member we defer this
  6996. // action until the whole struct has been processed.
  6997. // FIXME: MSVC 2013 seems to require this-> to find member CGF.
  6998. if (isa<MemberExpr>(IE)) {
  6999. // Insert the pointer into Info to be processed by
  7000. // generateInfoForComponentList. Because it is a member pointer
  7001. // without a pointee, no entry will be generated for it, therefore
  7002. // we need to generate one after the whole struct has been processed.
  7003. // Nonetheless, generateInfoForComponentList must be called to take
  7004. // the pointer into account for the calculation of the range of the
  7005. // partial struct.
  7006. InfoGen(nullptr, L.second, OMPC_MAP_unknown, OMPC_MAP_unknown,
  7007. /*ReturnDevicePointer=*/false, C->isImplicit());
  7008. DeferredInfo[nullptr].emplace_back(IE, VD);
  7009. } else {
  7010. llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
  7011. this->CGF.EmitLValue(IE), IE->getExprLoc());
  7012. BasePointers.emplace_back(Ptr, VD);
  7013. Pointers.push_back(Ptr);
  7014. Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
  7015. Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
  7016. }
  7017. }
  7018. }
  7019. for (const auto &M : Info) {
  7020. // We need to know when we generate information for the first component
  7021. // associated with a capture, because the mapping flags depend on it.
  7022. bool IsFirstComponentList = true;
  7023. // Temporary versions of arrays
  7024. MapBaseValuesArrayTy CurBasePointers;
  7025. MapValuesArrayTy CurPointers;
  7026. MapValuesArrayTy CurSizes;
  7027. MapFlagsArrayTy CurTypes;
  7028. StructRangeInfoTy PartialStruct;
  7029. for (const MapInfo &L : M.second) {
  7030. assert(!L.Components.empty() &&
  7031. "Not expecting declaration with no component lists.");
  7032. // Remember the current base pointer index.
  7033. unsigned CurrentBasePointersIdx = CurBasePointers.size();
  7034. // FIXME: MSVC 2013 seems to require this-> to find the member method.
  7035. this->generateInfoForComponentList(
  7036. L.MapType, L.MapTypeModifier, L.Components, CurBasePointers,
  7037. CurPointers, CurSizes, CurTypes, PartialStruct,
  7038. IsFirstComponentList, L.IsImplicit);
  7039. // If this entry relates with a device pointer, set the relevant
  7040. // declaration and add the 'return pointer' flag.
  7041. if (L.ReturnDevicePointer) {
  7042. assert(CurBasePointers.size() > CurrentBasePointersIdx &&
  7043. "Unexpected number of mapped base pointers.");
  7044. const ValueDecl *RelevantVD =
  7045. L.Components.back().getAssociatedDeclaration();
  7046. assert(RelevantVD &&
  7047. "No relevant declaration related with device pointer??");
  7048. CurBasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
  7049. CurTypes[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
  7050. }
  7051. IsFirstComponentList = false;
  7052. }
  7053. // Append any pending zero-length pointers which are struct members and
  7054. // used with use_device_ptr.
  7055. auto CI = DeferredInfo.find(M.first);
  7056. if (CI != DeferredInfo.end()) {
  7057. for (const DeferredDevicePtrEntryTy &L : CI->second) {
  7058. llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer();
  7059. llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
  7060. this->CGF.EmitLValue(L.IE), L.IE->getExprLoc());
  7061. CurBasePointers.emplace_back(BasePtr, L.VD);
  7062. CurPointers.push_back(Ptr);
  7063. CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
  7064. // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
  7065. // value MEMBER_OF=FFFF so that the entry is later updated with the
  7066. // correct value of MEMBER_OF.
  7067. CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
  7068. OMP_MAP_MEMBER_OF);
  7069. }
  7070. }
  7071. // If there is an entry in PartialStruct it means we have a struct with
  7072. // individual members mapped. Emit an extra combined entry.
  7073. if (PartialStruct.Base.isValid())
  7074. emitCombinedEntry(BasePointers, Pointers, Sizes, Types, CurTypes,
  7075. PartialStruct);
  7076. // We need to append the results of this capture to what we already have.
  7077. BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
  7078. Pointers.append(CurPointers.begin(), CurPointers.end());
  7079. Sizes.append(CurSizes.begin(), CurSizes.end());
  7080. Types.append(CurTypes.begin(), CurTypes.end());
  7081. }
  7082. }
  7083. /// Emit capture info for lambdas for variables captured by reference.
  7084. void generateInfoForLambdaCaptures(
  7085. const ValueDecl *VD, llvm::Value *Arg, MapBaseValuesArrayTy &BasePointers,
  7086. MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
  7087. MapFlagsArrayTy &Types,
  7088. llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
  7089. const auto *RD = VD->getType()
  7090. .getCanonicalType()
  7091. .getNonReferenceType()
  7092. ->getAsCXXRecordDecl();
  7093. if (!RD || !RD->isLambda())
  7094. return;
  7095. Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD));
  7096. LValue VDLVal = CGF.MakeAddrLValue(
  7097. VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
  7098. llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
  7099. FieldDecl *ThisCapture = nullptr;
  7100. RD->getCaptureFields(Captures, ThisCapture);
  7101. if (ThisCapture) {
  7102. LValue ThisLVal =
  7103. CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
  7104. LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
  7105. LambdaPointers.try_emplace(ThisLVal.getPointer(), VDLVal.getPointer());
  7106. BasePointers.push_back(ThisLVal.getPointer());
  7107. Pointers.push_back(ThisLValVal.getPointer());
  7108. Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
  7109. Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
  7110. OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
  7111. }
  7112. for (const LambdaCapture &LC : RD->captures()) {
  7113. if (LC.getCaptureKind() != LCK_ByRef)
  7114. continue;
  7115. const VarDecl *VD = LC.getCapturedVar();
  7116. auto It = Captures.find(VD);
  7117. assert(It != Captures.end() && "Found lambda capture without field.");
  7118. LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
  7119. LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
  7120. LambdaPointers.try_emplace(VarLVal.getPointer(), VDLVal.getPointer());
  7121. BasePointers.push_back(VarLVal.getPointer());
  7122. Pointers.push_back(VarLValVal.getPointer());
  7123. Sizes.push_back(CGF.getTypeSize(
  7124. VD->getType().getCanonicalType().getNonReferenceType()));
  7125. Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
  7126. OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
  7127. }
  7128. }
  7129. /// Set correct indices for lambdas captures.
  7130. void adjustMemberOfForLambdaCaptures(
  7131. const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
  7132. MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
  7133. MapFlagsArrayTy &Types) const {
  7134. for (unsigned I = 0, E = Types.size(); I < E; ++I) {
  7135. // Set correct member_of idx for all implicit lambda captures.
  7136. if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
  7137. OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT))
  7138. continue;
  7139. llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
  7140. assert(BasePtr && "Unable to find base lambda address.");
  7141. int TgtIdx = -1;
  7142. for (unsigned J = I; J > 0; --J) {
  7143. unsigned Idx = J - 1;
  7144. if (Pointers[Idx] != BasePtr)
  7145. continue;
  7146. TgtIdx = Idx;
  7147. break;
  7148. }
  7149. assert(TgtIdx != -1 && "Unable to find parent lambda.");
  7150. // All other current entries will be MEMBER_OF the combined entry
  7151. // (except for PTR_AND_OBJ entries which do not have a placeholder value
  7152. // 0xFFFF in the MEMBER_OF field).
  7153. OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
  7154. setCorrectMemberOfFlag(Types[I], MemberOfFlag);
  7155. }
  7156. }
  7157. /// Generate the base pointers, section pointers, sizes and map types
  7158. /// associated to a given capture.
  7159. void generateInfoForCapture(const CapturedStmt::Capture *Cap,
  7160. llvm::Value *Arg,
  7161. MapBaseValuesArrayTy &BasePointers,
  7162. MapValuesArrayTy &Pointers,
  7163. MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
  7164. StructRangeInfoTy &PartialStruct) const {
  7165. assert(!Cap->capturesVariableArrayType() &&
  7166. "Not expecting to generate map info for a variable array type!");
  7167. // We need to know when we generating information for the first component
  7168. const ValueDecl *VD = Cap->capturesThis()
  7169. ? nullptr
  7170. : Cap->getCapturedVar()->getCanonicalDecl();
  7171. // If this declaration appears in a is_device_ptr clause we just have to
  7172. // pass the pointer by value. If it is a reference to a declaration, we just
  7173. // pass its value.
  7174. if (DevPointersMap.count(VD)) {
  7175. BasePointers.emplace_back(Arg, VD);
  7176. Pointers.push_back(Arg);
  7177. Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
  7178. Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
  7179. return;
  7180. }
  7181. using MapData =
  7182. std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
  7183. OpenMPMapClauseKind, OpenMPMapClauseKind, bool>;
  7184. SmallVector<MapData, 4> DeclComponentLists;
  7185. // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
  7186. for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) {
  7187. for (const auto &L : C->decl_component_lists(VD)) {
  7188. assert(L.first == VD &&
  7189. "We got information for the wrong declaration??");
  7190. assert(!L.second.empty() &&
  7191. "Not expecting declaration with no component lists.");
  7192. DeclComponentLists.emplace_back(L.second, C->getMapType(),
  7193. C->getMapTypeModifier(),
  7194. C->isImplicit());
  7195. }
  7196. }
  7197. // Find overlapping elements (including the offset from the base element).
  7198. llvm::SmallDenseMap<
  7199. const MapData *,
  7200. llvm::SmallVector<
  7201. OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>,
  7202. 4>
  7203. OverlappedData;
  7204. size_t Count = 0;
  7205. for (const MapData &L : DeclComponentLists) {
  7206. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  7207. OpenMPMapClauseKind MapType;
  7208. OpenMPMapClauseKind MapTypeModifier;
  7209. bool IsImplicit;
  7210. std::tie(Components, MapType, MapTypeModifier, IsImplicit) = L;
  7211. ++Count;
  7212. for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) {
  7213. OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
  7214. std::tie(Components1, MapType, MapTypeModifier, IsImplicit) = L1;
  7215. auto CI = Components.rbegin();
  7216. auto CE = Components.rend();
  7217. auto SI = Components1.rbegin();
  7218. auto SE = Components1.rend();
  7219. for (; CI != CE && SI != SE; ++CI, ++SI) {
  7220. if (CI->getAssociatedExpression()->getStmtClass() !=
  7221. SI->getAssociatedExpression()->getStmtClass())
  7222. break;
  7223. // Are we dealing with different variables/fields?
  7224. if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
  7225. break;
  7226. }
  7227. // Found overlapping if, at least for one component, reached the head of
  7228. // the components list.
  7229. if (CI == CE || SI == SE) {
  7230. assert((CI != CE || SI != SE) &&
  7231. "Unexpected full match of the mapping components.");
  7232. const MapData &BaseData = CI == CE ? L : L1;
  7233. OMPClauseMappableExprCommon::MappableExprComponentListRef SubData =
  7234. SI == SE ? Components : Components1;
  7235. auto &OverlappedElements = OverlappedData.FindAndConstruct(&BaseData);
  7236. OverlappedElements.getSecond().push_back(SubData);
  7237. }
  7238. }
  7239. }
  7240. // Sort the overlapped elements for each item.
  7241. llvm::SmallVector<const FieldDecl *, 4> Layout;
  7242. if (!OverlappedData.empty()) {
  7243. if (const auto *CRD =
  7244. VD->getType().getCanonicalType()->getAsCXXRecordDecl())
  7245. getPlainLayout(CRD, Layout, /*AsBase=*/false);
  7246. else {
  7247. const auto *RD = VD->getType().getCanonicalType()->getAsRecordDecl();
  7248. Layout.append(RD->field_begin(), RD->field_end());
  7249. }
  7250. }
  7251. for (auto &Pair : OverlappedData) {
  7252. llvm::sort(
  7253. Pair.getSecond(),
  7254. [&Layout](
  7255. OMPClauseMappableExprCommon::MappableExprComponentListRef First,
  7256. OMPClauseMappableExprCommon::MappableExprComponentListRef
  7257. Second) {
  7258. auto CI = First.rbegin();
  7259. auto CE = First.rend();
  7260. auto SI = Second.rbegin();
  7261. auto SE = Second.rend();
  7262. for (; CI != CE && SI != SE; ++CI, ++SI) {
  7263. if (CI->getAssociatedExpression()->getStmtClass() !=
  7264. SI->getAssociatedExpression()->getStmtClass())
  7265. break;
  7266. // Are we dealing with different variables/fields?
  7267. if (CI->getAssociatedDeclaration() !=
  7268. SI->getAssociatedDeclaration())
  7269. break;
  7270. }
  7271. // Lists contain the same elements.
  7272. if (CI == CE && SI == SE)
  7273. return false;
  7274. // List with less elements is less than list with more elements.
  7275. if (CI == CE || SI == SE)
  7276. return CI == CE;
  7277. const auto *FD1 = cast<FieldDecl>(CI->getAssociatedDeclaration());
  7278. const auto *FD2 = cast<FieldDecl>(SI->getAssociatedDeclaration());
  7279. if (FD1->getParent() == FD2->getParent())
  7280. return FD1->getFieldIndex() < FD2->getFieldIndex();
  7281. const auto It =
  7282. llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) {
  7283. return FD == FD1 || FD == FD2;
  7284. });
  7285. return *It == FD1;
  7286. });
  7287. }
  7288. // Associated with a capture, because the mapping flags depend on it.
  7289. // Go through all of the elements with the overlapped elements.
  7290. for (const auto &Pair : OverlappedData) {
  7291. const MapData &L = *Pair.getFirst();
  7292. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  7293. OpenMPMapClauseKind MapType;
  7294. OpenMPMapClauseKind MapTypeModifier;
  7295. bool IsImplicit;
  7296. std::tie(Components, MapType, MapTypeModifier, IsImplicit) = L;
  7297. ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
  7298. OverlappedComponents = Pair.getSecond();
  7299. bool IsFirstComponentList = true;
  7300. generateInfoForComponentList(MapType, MapTypeModifier, Components,
  7301. BasePointers, Pointers, Sizes, Types,
  7302. PartialStruct, IsFirstComponentList,
  7303. IsImplicit, OverlappedComponents);
  7304. }
  7305. // Go through other elements without overlapped elements.
  7306. bool IsFirstComponentList = OverlappedData.empty();
  7307. for (const MapData &L : DeclComponentLists) {
  7308. OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
  7309. OpenMPMapClauseKind MapType;
  7310. OpenMPMapClauseKind MapTypeModifier;
  7311. bool IsImplicit;
  7312. std::tie(Components, MapType, MapTypeModifier, IsImplicit) = L;
  7313. auto It = OverlappedData.find(&L);
  7314. if (It == OverlappedData.end())
  7315. generateInfoForComponentList(MapType, MapTypeModifier, Components,
  7316. BasePointers, Pointers, Sizes, Types,
  7317. PartialStruct, IsFirstComponentList,
  7318. IsImplicit);
  7319. IsFirstComponentList = false;
  7320. }
  7321. }
  7322. /// Generate the base pointers, section pointers, sizes and map types
  7323. /// associated with the declare target link variables.
  7324. void generateInfoForDeclareTargetLink(MapBaseValuesArrayTy &BasePointers,
  7325. MapValuesArrayTy &Pointers,
  7326. MapValuesArrayTy &Sizes,
  7327. MapFlagsArrayTy &Types) const {
  7328. // Map other list items in the map clause which are not captured variables
  7329. // but "declare target link" global variables.,
  7330. for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) {
  7331. for (const auto &L : C->component_lists()) {
  7332. if (!L.first)
  7333. continue;
  7334. const auto *VD = dyn_cast<VarDecl>(L.first);
  7335. if (!VD)
  7336. continue;
  7337. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  7338. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  7339. if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
  7340. continue;
  7341. StructRangeInfoTy PartialStruct;
  7342. generateInfoForComponentList(
  7343. C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
  7344. Pointers, Sizes, Types, PartialStruct,
  7345. /*IsFirstComponentList=*/true, C->isImplicit());
  7346. assert(!PartialStruct.Base.isValid() &&
  7347. "No partial structs for declare target link expected.");
  7348. }
  7349. }
  7350. }
  7351. /// Generate the default map information for a given capture \a CI,
  7352. /// record field declaration \a RI and captured value \a CV.
  7353. void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
  7354. const FieldDecl &RI, llvm::Value *CV,
  7355. MapBaseValuesArrayTy &CurBasePointers,
  7356. MapValuesArrayTy &CurPointers,
  7357. MapValuesArrayTy &CurSizes,
  7358. MapFlagsArrayTy &CurMapTypes) const {
  7359. // Do the default mapping.
  7360. if (CI.capturesThis()) {
  7361. CurBasePointers.push_back(CV);
  7362. CurPointers.push_back(CV);
  7363. const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
  7364. CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
  7365. // Default map type.
  7366. CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
  7367. } else if (CI.capturesVariableByCopy()) {
  7368. CurBasePointers.push_back(CV);
  7369. CurPointers.push_back(CV);
  7370. if (!RI.getType()->isAnyPointerType()) {
  7371. // We have to signal to the runtime captures passed by value that are
  7372. // not pointers.
  7373. CurMapTypes.push_back(OMP_MAP_LITERAL);
  7374. CurSizes.push_back(CGF.getTypeSize(RI.getType()));
  7375. } else {
  7376. // Pointers are implicitly mapped with a zero size and no flags
  7377. // (other than first map that is added for all implicit maps).
  7378. CurMapTypes.push_back(OMP_MAP_NONE);
  7379. CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy));
  7380. }
  7381. } else {
  7382. assert(CI.capturesVariable() && "Expected captured reference.");
  7383. CurBasePointers.push_back(CV);
  7384. CurPointers.push_back(CV);
  7385. const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
  7386. QualType ElementType = PtrTy->getPointeeType();
  7387. CurSizes.push_back(CGF.getTypeSize(ElementType));
  7388. // The default map type for a scalar/complex type is 'to' because by
  7389. // default the value doesn't have to be retrieved. For an aggregate
  7390. // type, the default is 'tofrom'.
  7391. CurMapTypes.push_back(getMapModifiersForPrivateClauses(CI));
  7392. }
  7393. // Every default map produces a single argument which is a target parameter.
  7394. CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
  7395. // Add flag stating this is an implicit map.
  7396. CurMapTypes.back() |= OMP_MAP_IMPLICIT;
  7397. }
  7398. };
  7399. enum OpenMPOffloadingReservedDeviceIDs {
  7400. /// Device ID if the device was not defined, runtime should get it
  7401. /// from environment variables in the spec.
  7402. OMP_DEVICEID_UNDEF = -1,
  7403. };
  7404. } // anonymous namespace
  7405. /// Emit the arrays used to pass the captures and map information to the
  7406. /// offloading runtime library. If there is no map or capture information,
  7407. /// return nullptr by reference.
  7408. static void
  7409. emitOffloadingArrays(CodeGenFunction &CGF,
  7410. MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
  7411. MappableExprsHandler::MapValuesArrayTy &Pointers,
  7412. MappableExprsHandler::MapValuesArrayTy &Sizes,
  7413. MappableExprsHandler::MapFlagsArrayTy &MapTypes,
  7414. CGOpenMPRuntime::TargetDataInfo &Info) {
  7415. CodeGenModule &CGM = CGF.CGM;
  7416. ASTContext &Ctx = CGF.getContext();
  7417. // Reset the array information.
  7418. Info.clearArrayInfo();
  7419. Info.NumberOfPtrs = BasePointers.size();
  7420. if (Info.NumberOfPtrs) {
  7421. // Detect if we have any capture size requiring runtime evaluation of the
  7422. // size so that a constant array could be eventually used.
  7423. bool hasRuntimeEvaluationCaptureSize = false;
  7424. for (llvm::Value *S : Sizes)
  7425. if (!isa<llvm::Constant>(S)) {
  7426. hasRuntimeEvaluationCaptureSize = true;
  7427. break;
  7428. }
  7429. llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
  7430. QualType PointerArrayType =
  7431. Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
  7432. /*IndexTypeQuals=*/0);
  7433. Info.BasePointersArray =
  7434. CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
  7435. Info.PointersArray =
  7436. CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
  7437. // If we don't have any VLA types or other types that require runtime
  7438. // evaluation, we can use a constant array for the map sizes, otherwise we
  7439. // need to fill up the arrays as we do for the pointers.
  7440. if (hasRuntimeEvaluationCaptureSize) {
  7441. QualType SizeArrayType = Ctx.getConstantArrayType(
  7442. Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
  7443. /*IndexTypeQuals=*/0);
  7444. Info.SizesArray =
  7445. CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
  7446. } else {
  7447. // We expect all the sizes to be constant, so we collect them to create
  7448. // a constant array.
  7449. SmallVector<llvm::Constant *, 16> ConstSizes;
  7450. for (llvm::Value *S : Sizes)
  7451. ConstSizes.push_back(cast<llvm::Constant>(S));
  7452. auto *SizesArrayInit = llvm::ConstantArray::get(
  7453. llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
  7454. std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
  7455. auto *SizesArrayGbl = new llvm::GlobalVariable(
  7456. CGM.getModule(), SizesArrayInit->getType(),
  7457. /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
  7458. SizesArrayInit, Name);
  7459. SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
  7460. Info.SizesArray = SizesArrayGbl;
  7461. }
  7462. // The map types are always constant so we don't need to generate code to
  7463. // fill arrays. Instead, we create an array constant.
  7464. SmallVector<uint64_t, 4> Mapping(MapTypes.size(), 0);
  7465. llvm::copy(MapTypes, Mapping.begin());
  7466. llvm::Constant *MapTypesArrayInit =
  7467. llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
  7468. std::string MaptypesName =
  7469. CGM.getOpenMPRuntime().getName({"offload_maptypes"});
  7470. auto *MapTypesArrayGbl = new llvm::GlobalVariable(
  7471. CGM.getModule(), MapTypesArrayInit->getType(),
  7472. /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
  7473. MapTypesArrayInit, MaptypesName);
  7474. MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
  7475. Info.MapTypesArray = MapTypesArrayGbl;
  7476. for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
  7477. llvm::Value *BPVal = *BasePointers[I];
  7478. llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
  7479. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  7480. Info.BasePointersArray, 0, I);
  7481. BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  7482. BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
  7483. Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
  7484. CGF.Builder.CreateStore(BPVal, BPAddr);
  7485. if (Info.requiresDevicePointerInfo())
  7486. if (const ValueDecl *DevVD = BasePointers[I].getDevicePtrDecl())
  7487. Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
  7488. llvm::Value *PVal = Pointers[I];
  7489. llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
  7490. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  7491. Info.PointersArray, 0, I);
  7492. P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  7493. P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
  7494. Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
  7495. CGF.Builder.CreateStore(PVal, PAddr);
  7496. if (hasRuntimeEvaluationCaptureSize) {
  7497. llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
  7498. llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
  7499. Info.SizesArray,
  7500. /*Idx0=*/0,
  7501. /*Idx1=*/I);
  7502. Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
  7503. CGF.Builder.CreateStore(
  7504. CGF.Builder.CreateIntCast(Sizes[I], CGM.SizeTy, /*isSigned=*/true),
  7505. SAddr);
  7506. }
  7507. }
  7508. }
  7509. }
  7510. /// Emit the arguments to be passed to the runtime library based on the
  7511. /// arrays of pointers, sizes and map types.
  7512. static void emitOffloadingArraysArgument(
  7513. CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
  7514. llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
  7515. llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
  7516. CodeGenModule &CGM = CGF.CGM;
  7517. if (Info.NumberOfPtrs) {
  7518. BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  7519. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  7520. Info.BasePointersArray,
  7521. /*Idx0=*/0, /*Idx1=*/0);
  7522. PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  7523. llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
  7524. Info.PointersArray,
  7525. /*Idx0=*/0,
  7526. /*Idx1=*/0);
  7527. SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  7528. llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray,
  7529. /*Idx0=*/0, /*Idx1=*/0);
  7530. MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
  7531. llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
  7532. Info.MapTypesArray,
  7533. /*Idx0=*/0,
  7534. /*Idx1=*/0);
  7535. } else {
  7536. BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
  7537. PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
  7538. SizesArrayArg = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
  7539. MapTypesArrayArg =
  7540. llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
  7541. }
  7542. }
  7543. void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
  7544. const OMPExecutableDirective &D,
  7545. llvm::Value *OutlinedFn,
  7546. llvm::Value *OutlinedFnID,
  7547. const Expr *IfCond, const Expr *Device) {
  7548. if (!CGF.HaveInsertPoint())
  7549. return;
  7550. assert(OutlinedFn && "Invalid outlined function!");
  7551. const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>();
  7552. llvm::SmallVector<llvm::Value *, 16> CapturedVars;
  7553. const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
  7554. auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
  7555. PrePostActionTy &) {
  7556. CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
  7557. };
  7558. emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
  7559. CodeGenFunction::OMPTargetDataInfo InputInfo;
  7560. llvm::Value *MapTypesArray = nullptr;
  7561. // Fill up the pointer arrays and transfer execution to the device.
  7562. auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
  7563. &MapTypesArray, &CS, RequiresOuterTask,
  7564. &CapturedVars](CodeGenFunction &CGF, PrePostActionTy &) {
  7565. // On top of the arrays that were filled up, the target offloading call
  7566. // takes as arguments the device id as well as the host pointer. The host
  7567. // pointer is used by the runtime library to identify the current target
  7568. // region, so it only has to be unique and not necessarily point to
  7569. // anything. It could be the pointer to the outlined function that
  7570. // implements the target region, but we aren't using that so that the
  7571. // compiler doesn't need to keep that, and could therefore inline the host
  7572. // function if proven worthwhile during optimization.
  7573. // From this point on, we need to have an ID of the target region defined.
  7574. assert(OutlinedFnID && "Invalid outlined function ID!");
  7575. // Emit device ID if any.
  7576. llvm::Value *DeviceID;
  7577. if (Device) {
  7578. DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
  7579. CGF.Int64Ty, /*isSigned=*/true);
  7580. } else {
  7581. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  7582. }
  7583. // Emit the number of elements in the offloading arrays.
  7584. llvm::Value *PointerNum =
  7585. CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
  7586. // Return value of the runtime offloading call.
  7587. llvm::Value *Return;
  7588. llvm::Value *NumTeams = emitNumTeamsForTargetDirective(*this, CGF, D);
  7589. llvm::Value *NumThreads = emitNumThreadsForTargetDirective(*this, CGF, D);
  7590. bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
  7591. // The target region is an outlined function launched by the runtime
  7592. // via calls __tgt_target() or __tgt_target_teams().
  7593. //
  7594. // __tgt_target() launches a target region with one team and one thread,
  7595. // executing a serial region. This master thread may in turn launch
  7596. // more threads within its team upon encountering a parallel region,
  7597. // however, no additional teams can be launched on the device.
  7598. //
  7599. // __tgt_target_teams() launches a target region with one or more teams,
  7600. // each with one or more threads. This call is required for target
  7601. // constructs such as:
  7602. // 'target teams'
  7603. // 'target' / 'teams'
  7604. // 'target teams distribute parallel for'
  7605. // 'target parallel'
  7606. // and so on.
  7607. //
  7608. // Note that on the host and CPU targets, the runtime implementation of
  7609. // these calls simply call the outlined function without forking threads.
  7610. // The outlined functions themselves have runtime calls to
  7611. // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
  7612. // the compiler in emitTeamsCall() and emitParallelCall().
  7613. //
  7614. // In contrast, on the NVPTX target, the implementation of
  7615. // __tgt_target_teams() launches a GPU kernel with the requested number
  7616. // of teams and threads so no additional calls to the runtime are required.
  7617. if (NumTeams) {
  7618. // If we have NumTeams defined this means that we have an enclosed teams
  7619. // region. Therefore we also expect to have NumThreads defined. These two
  7620. // values should be defined in the presence of a teams directive,
  7621. // regardless of having any clauses associated. If the user is using teams
  7622. // but no clauses, these two values will be the default that should be
  7623. // passed to the runtime library - a 32-bit integer with the value zero.
  7624. assert(NumThreads && "Thread limit expression should be available along "
  7625. "with number of teams.");
  7626. llvm::Value *OffloadingArgs[] = {DeviceID,
  7627. OutlinedFnID,
  7628. PointerNum,
  7629. InputInfo.BasePointersArray.getPointer(),
  7630. InputInfo.PointersArray.getPointer(),
  7631. InputInfo.SizesArray.getPointer(),
  7632. MapTypesArray,
  7633. NumTeams,
  7634. NumThreads};
  7635. Return = CGF.EmitRuntimeCall(
  7636. createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
  7637. : OMPRTL__tgt_target_teams),
  7638. OffloadingArgs);
  7639. } else {
  7640. llvm::Value *OffloadingArgs[] = {DeviceID,
  7641. OutlinedFnID,
  7642. PointerNum,
  7643. InputInfo.BasePointersArray.getPointer(),
  7644. InputInfo.PointersArray.getPointer(),
  7645. InputInfo.SizesArray.getPointer(),
  7646. MapTypesArray};
  7647. Return = CGF.EmitRuntimeCall(
  7648. createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
  7649. : OMPRTL__tgt_target),
  7650. OffloadingArgs);
  7651. }
  7652. // Check the error code and execute the host version if required.
  7653. llvm::BasicBlock *OffloadFailedBlock =
  7654. CGF.createBasicBlock("omp_offload.failed");
  7655. llvm::BasicBlock *OffloadContBlock =
  7656. CGF.createBasicBlock("omp_offload.cont");
  7657. llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
  7658. CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
  7659. CGF.EmitBlock(OffloadFailedBlock);
  7660. if (RequiresOuterTask) {
  7661. CapturedVars.clear();
  7662. CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
  7663. }
  7664. emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
  7665. CGF.EmitBranch(OffloadContBlock);
  7666. CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
  7667. };
  7668. // Notify that the host version must be executed.
  7669. auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
  7670. RequiresOuterTask](CodeGenFunction &CGF,
  7671. PrePostActionTy &) {
  7672. if (RequiresOuterTask) {
  7673. CapturedVars.clear();
  7674. CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
  7675. }
  7676. emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
  7677. };
  7678. auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
  7679. &CapturedVars, RequiresOuterTask,
  7680. &CS](CodeGenFunction &CGF, PrePostActionTy &) {
  7681. // Fill up the arrays with all the captured variables.
  7682. MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
  7683. MappableExprsHandler::MapValuesArrayTy Pointers;
  7684. MappableExprsHandler::MapValuesArrayTy Sizes;
  7685. MappableExprsHandler::MapFlagsArrayTy MapTypes;
  7686. // Get mappable expression information.
  7687. MappableExprsHandler MEHandler(D, CGF);
  7688. llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
  7689. auto RI = CS.getCapturedRecordDecl()->field_begin();
  7690. auto CV = CapturedVars.begin();
  7691. for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
  7692. CE = CS.capture_end();
  7693. CI != CE; ++CI, ++RI, ++CV) {
  7694. MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
  7695. MappableExprsHandler::MapValuesArrayTy CurPointers;
  7696. MappableExprsHandler::MapValuesArrayTy CurSizes;
  7697. MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
  7698. MappableExprsHandler::StructRangeInfoTy PartialStruct;
  7699. // VLA sizes are passed to the outlined region by copy and do not have map
  7700. // information associated.
  7701. if (CI->capturesVariableArrayType()) {
  7702. CurBasePointers.push_back(*CV);
  7703. CurPointers.push_back(*CV);
  7704. CurSizes.push_back(CGF.getTypeSize(RI->getType()));
  7705. // Copy to the device as an argument. No need to retrieve it.
  7706. CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
  7707. MappableExprsHandler::OMP_MAP_TARGET_PARAM);
  7708. } else {
  7709. // If we have any information in the map clause, we use it, otherwise we
  7710. // just do a default mapping.
  7711. MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
  7712. CurSizes, CurMapTypes, PartialStruct);
  7713. if (CurBasePointers.empty())
  7714. MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
  7715. CurPointers, CurSizes, CurMapTypes);
  7716. // Generate correct mapping for variables captured by reference in
  7717. // lambdas.
  7718. if (CI->capturesVariable())
  7719. MEHandler.generateInfoForLambdaCaptures(
  7720. CI->getCapturedVar(), *CV, CurBasePointers, CurPointers, CurSizes,
  7721. CurMapTypes, LambdaPointers);
  7722. }
  7723. // We expect to have at least an element of information for this capture.
  7724. assert(!CurBasePointers.empty() &&
  7725. "Non-existing map pointer for capture!");
  7726. assert(CurBasePointers.size() == CurPointers.size() &&
  7727. CurBasePointers.size() == CurSizes.size() &&
  7728. CurBasePointers.size() == CurMapTypes.size() &&
  7729. "Inconsistent map information sizes!");
  7730. // If there is an entry in PartialStruct it means we have a struct with
  7731. // individual members mapped. Emit an extra combined entry.
  7732. if (PartialStruct.Base.isValid())
  7733. MEHandler.emitCombinedEntry(BasePointers, Pointers, Sizes, MapTypes,
  7734. CurMapTypes, PartialStruct);
  7735. // We need to append the results of this capture to what we already have.
  7736. BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
  7737. Pointers.append(CurPointers.begin(), CurPointers.end());
  7738. Sizes.append(CurSizes.begin(), CurSizes.end());
  7739. MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
  7740. }
  7741. // Adjust MEMBER_OF flags for the lambdas captures.
  7742. MEHandler.adjustMemberOfForLambdaCaptures(LambdaPointers, BasePointers,
  7743. Pointers, MapTypes);
  7744. // Map other list items in the map clause which are not captured variables
  7745. // but "declare target link" global variables.
  7746. MEHandler.generateInfoForDeclareTargetLink(BasePointers, Pointers, Sizes,
  7747. MapTypes);
  7748. TargetDataInfo Info;
  7749. // Fill up the arrays and create the arguments.
  7750. emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
  7751. emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
  7752. Info.PointersArray, Info.SizesArray,
  7753. Info.MapTypesArray, Info);
  7754. InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
  7755. InputInfo.BasePointersArray =
  7756. Address(Info.BasePointersArray, CGM.getPointerAlign());
  7757. InputInfo.PointersArray =
  7758. Address(Info.PointersArray, CGM.getPointerAlign());
  7759. InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
  7760. MapTypesArray = Info.MapTypesArray;
  7761. if (RequiresOuterTask)
  7762. CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
  7763. else
  7764. emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
  7765. };
  7766. auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
  7767. CodeGenFunction &CGF, PrePostActionTy &) {
  7768. if (RequiresOuterTask) {
  7769. CodeGenFunction::OMPTargetDataInfo InputInfo;
  7770. CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
  7771. } else {
  7772. emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
  7773. }
  7774. };
  7775. // If we have a target function ID it means that we need to support
  7776. // offloading, otherwise, just execute on the host. We need to execute on host
  7777. // regardless of the conditional in the if clause if, e.g., the user do not
  7778. // specify target triples.
  7779. if (OutlinedFnID) {
  7780. if (IfCond) {
  7781. emitOMPIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
  7782. } else {
  7783. RegionCodeGenTy ThenRCG(TargetThenGen);
  7784. ThenRCG(CGF);
  7785. }
  7786. } else {
  7787. RegionCodeGenTy ElseRCG(TargetElseGen);
  7788. ElseRCG(CGF);
  7789. }
  7790. }
  7791. void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
  7792. StringRef ParentName) {
  7793. if (!S)
  7794. return;
  7795. // Codegen OMP target directives that offload compute to the device.
  7796. bool RequiresDeviceCodegen =
  7797. isa<OMPExecutableDirective>(S) &&
  7798. isOpenMPTargetExecutionDirective(
  7799. cast<OMPExecutableDirective>(S)->getDirectiveKind());
  7800. if (RequiresDeviceCodegen) {
  7801. const auto &E = *cast<OMPExecutableDirective>(S);
  7802. unsigned DeviceID;
  7803. unsigned FileID;
  7804. unsigned Line;
  7805. getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), DeviceID,
  7806. FileID, Line);
  7807. // Is this a target region that should not be emitted as an entry point? If
  7808. // so just signal we are done with this target region.
  7809. if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
  7810. ParentName, Line))
  7811. return;
  7812. switch (E.getDirectiveKind()) {
  7813. case OMPD_target:
  7814. CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
  7815. cast<OMPTargetDirective>(E));
  7816. break;
  7817. case OMPD_target_parallel:
  7818. CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
  7819. CGM, ParentName, cast<OMPTargetParallelDirective>(E));
  7820. break;
  7821. case OMPD_target_teams:
  7822. CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
  7823. CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
  7824. break;
  7825. case OMPD_target_teams_distribute:
  7826. CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
  7827. CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
  7828. break;
  7829. case OMPD_target_teams_distribute_simd:
  7830. CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
  7831. CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
  7832. break;
  7833. case OMPD_target_parallel_for:
  7834. CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
  7835. CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
  7836. break;
  7837. case OMPD_target_parallel_for_simd:
  7838. CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
  7839. CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
  7840. break;
  7841. case OMPD_target_simd:
  7842. CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
  7843. CGM, ParentName, cast<OMPTargetSimdDirective>(E));
  7844. break;
  7845. case OMPD_target_teams_distribute_parallel_for:
  7846. CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
  7847. CGM, ParentName,
  7848. cast<OMPTargetTeamsDistributeParallelForDirective>(E));
  7849. break;
  7850. case OMPD_target_teams_distribute_parallel_for_simd:
  7851. CodeGenFunction::
  7852. EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
  7853. CGM, ParentName,
  7854. cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
  7855. break;
  7856. case OMPD_parallel:
  7857. case OMPD_for:
  7858. case OMPD_parallel_for:
  7859. case OMPD_parallel_sections:
  7860. case OMPD_for_simd:
  7861. case OMPD_parallel_for_simd:
  7862. case OMPD_cancel:
  7863. case OMPD_cancellation_point:
  7864. case OMPD_ordered:
  7865. case OMPD_threadprivate:
  7866. case OMPD_task:
  7867. case OMPD_simd:
  7868. case OMPD_sections:
  7869. case OMPD_section:
  7870. case OMPD_single:
  7871. case OMPD_master:
  7872. case OMPD_critical:
  7873. case OMPD_taskyield:
  7874. case OMPD_barrier:
  7875. case OMPD_taskwait:
  7876. case OMPD_taskgroup:
  7877. case OMPD_atomic:
  7878. case OMPD_flush:
  7879. case OMPD_teams:
  7880. case OMPD_target_data:
  7881. case OMPD_target_exit_data:
  7882. case OMPD_target_enter_data:
  7883. case OMPD_distribute:
  7884. case OMPD_distribute_simd:
  7885. case OMPD_distribute_parallel_for:
  7886. case OMPD_distribute_parallel_for_simd:
  7887. case OMPD_teams_distribute:
  7888. case OMPD_teams_distribute_simd:
  7889. case OMPD_teams_distribute_parallel_for:
  7890. case OMPD_teams_distribute_parallel_for_simd:
  7891. case OMPD_target_update:
  7892. case OMPD_declare_simd:
  7893. case OMPD_declare_target:
  7894. case OMPD_end_declare_target:
  7895. case OMPD_declare_reduction:
  7896. case OMPD_taskloop:
  7897. case OMPD_taskloop_simd:
  7898. case OMPD_requires:
  7899. case OMPD_unknown:
  7900. llvm_unreachable("Unknown target directive for OpenMP device codegen.");
  7901. }
  7902. return;
  7903. }
  7904. if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
  7905. if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
  7906. return;
  7907. scanForTargetRegionsFunctions(
  7908. E->getInnermostCapturedStmt()->getCapturedStmt(), ParentName);
  7909. return;
  7910. }
  7911. // If this is a lambda function, look into its body.
  7912. if (const auto *L = dyn_cast<LambdaExpr>(S))
  7913. S = L->getBody();
  7914. // Keep looking for target regions recursively.
  7915. for (const Stmt *II : S->children())
  7916. scanForTargetRegionsFunctions(II, ParentName);
  7917. }
  7918. bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
  7919. // If emitting code for the host, we do not process FD here. Instead we do
  7920. // the normal code generation.
  7921. if (!CGM.getLangOpts().OpenMPIsDevice)
  7922. return false;
  7923. const ValueDecl *VD = cast<ValueDecl>(GD.getDecl());
  7924. StringRef Name = CGM.getMangledName(GD);
  7925. // Try to detect target regions in the function.
  7926. if (const auto *FD = dyn_cast<FunctionDecl>(VD))
  7927. scanForTargetRegionsFunctions(FD->getBody(), Name);
  7928. // Do not to emit function if it is not marked as declare target.
  7929. return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
  7930. AlreadyEmittedTargetFunctions.count(Name) == 0;
  7931. }
  7932. bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
  7933. if (!CGM.getLangOpts().OpenMPIsDevice)
  7934. return false;
  7935. // Check if there are Ctors/Dtors in this declaration and look for target
  7936. // regions in it. We use the complete variant to produce the kernel name
  7937. // mangling.
  7938. QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
  7939. if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
  7940. for (const CXXConstructorDecl *Ctor : RD->ctors()) {
  7941. StringRef ParentName =
  7942. CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
  7943. scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
  7944. }
  7945. if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
  7946. StringRef ParentName =
  7947. CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
  7948. scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
  7949. }
  7950. }
  7951. // Do not to emit variable if it is not marked as declare target.
  7952. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  7953. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
  7954. cast<VarDecl>(GD.getDecl()));
  7955. if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link) {
  7956. DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
  7957. return true;
  7958. }
  7959. return false;
  7960. }
  7961. void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
  7962. llvm::Constant *Addr) {
  7963. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  7964. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  7965. if (!Res) {
  7966. if (CGM.getLangOpts().OpenMPIsDevice) {
  7967. // Register non-target variables being emitted in device code (debug info
  7968. // may cause this).
  7969. StringRef VarName = CGM.getMangledName(VD);
  7970. EmittedNonTargetVariables.try_emplace(VarName, Addr);
  7971. }
  7972. return;
  7973. }
  7974. // Register declare target variables.
  7975. OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
  7976. StringRef VarName;
  7977. CharUnits VarSize;
  7978. llvm::GlobalValue::LinkageTypes Linkage;
  7979. switch (*Res) {
  7980. case OMPDeclareTargetDeclAttr::MT_To:
  7981. Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
  7982. VarName = CGM.getMangledName(VD);
  7983. if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
  7984. VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
  7985. assert(!VarSize.isZero() && "Expected non-zero size of the variable");
  7986. } else {
  7987. VarSize = CharUnits::Zero();
  7988. }
  7989. Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
  7990. // Temp solution to prevent optimizations of the internal variables.
  7991. if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
  7992. std::string RefName = getName({VarName, "ref"});
  7993. if (!CGM.GetGlobalValue(RefName)) {
  7994. llvm::Constant *AddrRef =
  7995. getOrCreateInternalVariable(Addr->getType(), RefName);
  7996. auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
  7997. GVAddrRef->setConstant(/*Val=*/true);
  7998. GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
  7999. GVAddrRef->setInitializer(Addr);
  8000. CGM.addCompilerUsedGlobal(GVAddrRef);
  8001. }
  8002. }
  8003. break;
  8004. case OMPDeclareTargetDeclAttr::MT_Link:
  8005. Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
  8006. if (CGM.getLangOpts().OpenMPIsDevice) {
  8007. VarName = Addr->getName();
  8008. Addr = nullptr;
  8009. } else {
  8010. VarName = getAddrOfDeclareTargetLink(VD).getName();
  8011. Addr = cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
  8012. }
  8013. VarSize = CGM.getPointerSize();
  8014. Linkage = llvm::GlobalValue::WeakAnyLinkage;
  8015. break;
  8016. }
  8017. OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
  8018. VarName, Addr, VarSize, Flags, Linkage);
  8019. }
  8020. bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
  8021. if (isa<FunctionDecl>(GD.getDecl()) ||
  8022. isa<OMPDeclareReductionDecl>(GD.getDecl()))
  8023. return emitTargetFunctions(GD);
  8024. return emitTargetGlobalVariable(GD);
  8025. }
  8026. void CGOpenMPRuntime::emitDeferredTargetDecls() const {
  8027. for (const VarDecl *VD : DeferredGlobalVariables) {
  8028. llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
  8029. OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
  8030. if (!Res)
  8031. continue;
  8032. if (*Res == OMPDeclareTargetDeclAttr::MT_To) {
  8033. CGM.EmitGlobal(VD);
  8034. } else {
  8035. assert(*Res == OMPDeclareTargetDeclAttr::MT_Link &&
  8036. "Expected to or link clauses.");
  8037. (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
  8038. }
  8039. }
  8040. }
  8041. void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
  8042. CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
  8043. assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
  8044. " Expected target-based directive.");
  8045. }
  8046. CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
  8047. CodeGenModule &CGM)
  8048. : CGM(CGM) {
  8049. if (CGM.getLangOpts().OpenMPIsDevice) {
  8050. SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
  8051. CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
  8052. }
  8053. }
  8054. CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
  8055. if (CGM.getLangOpts().OpenMPIsDevice)
  8056. CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
  8057. }
  8058. bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
  8059. if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
  8060. return true;
  8061. StringRef Name = CGM.getMangledName(GD);
  8062. const auto *D = cast<FunctionDecl>(GD.getDecl());
  8063. // Do not to emit function if it is marked as declare target as it was already
  8064. // emitted.
  8065. if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
  8066. if (D->hasBody() && AlreadyEmittedTargetFunctions.count(Name) == 0) {
  8067. if (auto *F = dyn_cast_or_null<llvm::Function>(CGM.GetGlobalValue(Name)))
  8068. return !F->isDeclaration();
  8069. return false;
  8070. }
  8071. return true;
  8072. }
  8073. return !AlreadyEmittedTargetFunctions.insert(Name).second;
  8074. }
  8075. llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
  8076. // If we have offloading in the current module, we need to emit the entries
  8077. // now and register the offloading descriptor.
  8078. createOffloadEntriesAndInfoMetadata();
  8079. // Create and register the offloading binary descriptors. This is the main
  8080. // entity that captures all the information about offloading in the current
  8081. // compilation unit.
  8082. return createOffloadingBinaryDescriptorRegistration();
  8083. }
  8084. void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
  8085. const OMPExecutableDirective &D,
  8086. SourceLocation Loc,
  8087. llvm::Value *OutlinedFn,
  8088. ArrayRef<llvm::Value *> CapturedVars) {
  8089. if (!CGF.HaveInsertPoint())
  8090. return;
  8091. llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
  8092. CodeGenFunction::RunCleanupsScope Scope(CGF);
  8093. // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
  8094. llvm::Value *Args[] = {
  8095. RTLoc,
  8096. CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
  8097. CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
  8098. llvm::SmallVector<llvm::Value *, 16> RealArgs;
  8099. RealArgs.append(std::begin(Args), std::end(Args));
  8100. RealArgs.append(CapturedVars.begin(), CapturedVars.end());
  8101. llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
  8102. CGF.EmitRuntimeCall(RTLFn, RealArgs);
  8103. }
  8104. void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
  8105. const Expr *NumTeams,
  8106. const Expr *ThreadLimit,
  8107. SourceLocation Loc) {
  8108. if (!CGF.HaveInsertPoint())
  8109. return;
  8110. llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
  8111. llvm::Value *NumTeamsVal =
  8112. NumTeams
  8113. ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
  8114. CGF.CGM.Int32Ty, /* isSigned = */ true)
  8115. : CGF.Builder.getInt32(0);
  8116. llvm::Value *ThreadLimitVal =
  8117. ThreadLimit
  8118. ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
  8119. CGF.CGM.Int32Ty, /* isSigned = */ true)
  8120. : CGF.Builder.getInt32(0);
  8121. // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
  8122. llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
  8123. ThreadLimitVal};
  8124. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams),
  8125. PushNumTeamsArgs);
  8126. }
  8127. void CGOpenMPRuntime::emitTargetDataCalls(
  8128. CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
  8129. const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
  8130. if (!CGF.HaveInsertPoint())
  8131. return;
  8132. // Action used to replace the default codegen action and turn privatization
  8133. // off.
  8134. PrePostActionTy NoPrivAction;
  8135. // Generate the code for the opening of the data environment. Capture all the
  8136. // arguments of the runtime call by reference because they are used in the
  8137. // closing of the region.
  8138. auto &&BeginThenGen = [this, &D, Device, &Info,
  8139. &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
  8140. // Fill up the arrays with all the mapped variables.
  8141. MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
  8142. MappableExprsHandler::MapValuesArrayTy Pointers;
  8143. MappableExprsHandler::MapValuesArrayTy Sizes;
  8144. MappableExprsHandler::MapFlagsArrayTy MapTypes;
  8145. // Get map clause information.
  8146. MappableExprsHandler MCHandler(D, CGF);
  8147. MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
  8148. // Fill up the arrays and create the arguments.
  8149. emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
  8150. llvm::Value *BasePointersArrayArg = nullptr;
  8151. llvm::Value *PointersArrayArg = nullptr;
  8152. llvm::Value *SizesArrayArg = nullptr;
  8153. llvm::Value *MapTypesArrayArg = nullptr;
  8154. emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
  8155. SizesArrayArg, MapTypesArrayArg, Info);
  8156. // Emit device ID if any.
  8157. llvm::Value *DeviceID = nullptr;
  8158. if (Device) {
  8159. DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
  8160. CGF.Int64Ty, /*isSigned=*/true);
  8161. } else {
  8162. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  8163. }
  8164. // Emit the number of elements in the offloading arrays.
  8165. llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
  8166. llvm::Value *OffloadingArgs[] = {
  8167. DeviceID, PointerNum, BasePointersArrayArg,
  8168. PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
  8169. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_begin),
  8170. OffloadingArgs);
  8171. // If device pointer privatization is required, emit the body of the region
  8172. // here. It will have to be duplicated: with and without privatization.
  8173. if (!Info.CaptureDeviceAddrMap.empty())
  8174. CodeGen(CGF);
  8175. };
  8176. // Generate code for the closing of the data region.
  8177. auto &&EndThenGen = [this, Device, &Info](CodeGenFunction &CGF,
  8178. PrePostActionTy &) {
  8179. assert(Info.isValid() && "Invalid data environment closing arguments.");
  8180. llvm::Value *BasePointersArrayArg = nullptr;
  8181. llvm::Value *PointersArrayArg = nullptr;
  8182. llvm::Value *SizesArrayArg = nullptr;
  8183. llvm::Value *MapTypesArrayArg = nullptr;
  8184. emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
  8185. SizesArrayArg, MapTypesArrayArg, Info);
  8186. // Emit device ID if any.
  8187. llvm::Value *DeviceID = nullptr;
  8188. if (Device) {
  8189. DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
  8190. CGF.Int64Ty, /*isSigned=*/true);
  8191. } else {
  8192. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  8193. }
  8194. // Emit the number of elements in the offloading arrays.
  8195. llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
  8196. llvm::Value *OffloadingArgs[] = {
  8197. DeviceID, PointerNum, BasePointersArrayArg,
  8198. PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
  8199. CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_end),
  8200. OffloadingArgs);
  8201. };
  8202. // If we need device pointer privatization, we need to emit the body of the
  8203. // region with no privatization in the 'else' branch of the conditional.
  8204. // Otherwise, we don't have to do anything.
  8205. auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
  8206. PrePostActionTy &) {
  8207. if (!Info.CaptureDeviceAddrMap.empty()) {
  8208. CodeGen.setAction(NoPrivAction);
  8209. CodeGen(CGF);
  8210. }
  8211. };
  8212. // We don't have to do anything to close the region if the if clause evaluates
  8213. // to false.
  8214. auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
  8215. if (IfCond) {
  8216. emitOMPIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
  8217. } else {
  8218. RegionCodeGenTy RCG(BeginThenGen);
  8219. RCG(CGF);
  8220. }
  8221. // If we don't require privatization of device pointers, we emit the body in
  8222. // between the runtime calls. This avoids duplicating the body code.
  8223. if (Info.CaptureDeviceAddrMap.empty()) {
  8224. CodeGen.setAction(NoPrivAction);
  8225. CodeGen(CGF);
  8226. }
  8227. if (IfCond) {
  8228. emitOMPIfClause(CGF, IfCond, EndThenGen, EndElseGen);
  8229. } else {
  8230. RegionCodeGenTy RCG(EndThenGen);
  8231. RCG(CGF);
  8232. }
  8233. }
  8234. void CGOpenMPRuntime::emitTargetDataStandAloneCall(
  8235. CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
  8236. const Expr *Device) {
  8237. if (!CGF.HaveInsertPoint())
  8238. return;
  8239. assert((isa<OMPTargetEnterDataDirective>(D) ||
  8240. isa<OMPTargetExitDataDirective>(D) ||
  8241. isa<OMPTargetUpdateDirective>(D)) &&
  8242. "Expecting either target enter, exit data, or update directives.");
  8243. CodeGenFunction::OMPTargetDataInfo InputInfo;
  8244. llvm::Value *MapTypesArray = nullptr;
  8245. // Generate the code for the opening of the data environment.
  8246. auto &&ThenGen = [this, &D, Device, &InputInfo,
  8247. &MapTypesArray](CodeGenFunction &CGF, PrePostActionTy &) {
  8248. // Emit device ID if any.
  8249. llvm::Value *DeviceID = nullptr;
  8250. if (Device) {
  8251. DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
  8252. CGF.Int64Ty, /*isSigned=*/true);
  8253. } else {
  8254. DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
  8255. }
  8256. // Emit the number of elements in the offloading arrays.
  8257. llvm::Constant *PointerNum =
  8258. CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
  8259. llvm::Value *OffloadingArgs[] = {DeviceID,
  8260. PointerNum,
  8261. InputInfo.BasePointersArray.getPointer(),
  8262. InputInfo.PointersArray.getPointer(),
  8263. InputInfo.SizesArray.getPointer(),
  8264. MapTypesArray};
  8265. // Select the right runtime function call for each expected standalone
  8266. // directive.
  8267. const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
  8268. OpenMPRTLFunction RTLFn;
  8269. switch (D.getDirectiveKind()) {
  8270. case OMPD_target_enter_data:
  8271. RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
  8272. : OMPRTL__tgt_target_data_begin;
  8273. break;
  8274. case OMPD_target_exit_data:
  8275. RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait
  8276. : OMPRTL__tgt_target_data_end;
  8277. break;
  8278. case OMPD_target_update:
  8279. RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
  8280. : OMPRTL__tgt_target_data_update;
  8281. break;
  8282. case OMPD_parallel:
  8283. case OMPD_for:
  8284. case OMPD_parallel_for:
  8285. case OMPD_parallel_sections:
  8286. case OMPD_for_simd:
  8287. case OMPD_parallel_for_simd:
  8288. case OMPD_cancel:
  8289. case OMPD_cancellation_point:
  8290. case OMPD_ordered:
  8291. case OMPD_threadprivate:
  8292. case OMPD_task:
  8293. case OMPD_simd:
  8294. case OMPD_sections:
  8295. case OMPD_section:
  8296. case OMPD_single:
  8297. case OMPD_master:
  8298. case OMPD_critical:
  8299. case OMPD_taskyield:
  8300. case OMPD_barrier:
  8301. case OMPD_taskwait:
  8302. case OMPD_taskgroup:
  8303. case OMPD_atomic:
  8304. case OMPD_flush:
  8305. case OMPD_teams:
  8306. case OMPD_target_data:
  8307. case OMPD_distribute:
  8308. case OMPD_distribute_simd:
  8309. case OMPD_distribute_parallel_for:
  8310. case OMPD_distribute_parallel_for_simd:
  8311. case OMPD_teams_distribute:
  8312. case OMPD_teams_distribute_simd:
  8313. case OMPD_teams_distribute_parallel_for:
  8314. case OMPD_teams_distribute_parallel_for_simd:
  8315. case OMPD_declare_simd:
  8316. case OMPD_declare_target:
  8317. case OMPD_end_declare_target:
  8318. case OMPD_declare_reduction:
  8319. case OMPD_taskloop:
  8320. case OMPD_taskloop_simd:
  8321. case OMPD_target:
  8322. case OMPD_target_simd:
  8323. case OMPD_target_teams_distribute:
  8324. case OMPD_target_teams_distribute_simd:
  8325. case OMPD_target_teams_distribute_parallel_for:
  8326. case OMPD_target_teams_distribute_parallel_for_simd:
  8327. case OMPD_target_teams:
  8328. case OMPD_target_parallel:
  8329. case OMPD_target_parallel_for:
  8330. case OMPD_target_parallel_for_simd:
  8331. case OMPD_requires:
  8332. case OMPD_unknown:
  8333. llvm_unreachable("Unexpected standalone target data directive.");
  8334. break;
  8335. }
  8336. CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs);
  8337. };
  8338. auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
  8339. CodeGenFunction &CGF, PrePostActionTy &) {
  8340. // Fill up the arrays with all the mapped variables.
  8341. MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
  8342. MappableExprsHandler::MapValuesArrayTy Pointers;
  8343. MappableExprsHandler::MapValuesArrayTy Sizes;
  8344. MappableExprsHandler::MapFlagsArrayTy MapTypes;
  8345. // Get map clause information.
  8346. MappableExprsHandler MEHandler(D, CGF);
  8347. MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
  8348. TargetDataInfo Info;
  8349. // Fill up the arrays and create the arguments.
  8350. emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
  8351. emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
  8352. Info.PointersArray, Info.SizesArray,
  8353. Info.MapTypesArray, Info);
  8354. InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
  8355. InputInfo.BasePointersArray =
  8356. Address(Info.BasePointersArray, CGM.getPointerAlign());
  8357. InputInfo.PointersArray =
  8358. Address(Info.PointersArray, CGM.getPointerAlign());
  8359. InputInfo.SizesArray =
  8360. Address(Info.SizesArray, CGM.getPointerAlign());
  8361. MapTypesArray = Info.MapTypesArray;
  8362. if (D.hasClausesOfKind<OMPDependClause>())
  8363. CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
  8364. else
  8365. emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
  8366. };
  8367. if (IfCond) {
  8368. emitOMPIfClause(CGF, IfCond, TargetThenGen,
  8369. [](CodeGenFunction &CGF, PrePostActionTy &) {});
  8370. } else {
  8371. RegionCodeGenTy ThenRCG(TargetThenGen);
  8372. ThenRCG(CGF);
  8373. }
  8374. }
  8375. namespace {
  8376. /// Kind of parameter in a function with 'declare simd' directive.
  8377. enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
  8378. /// Attribute set of the parameter.
  8379. struct ParamAttrTy {
  8380. ParamKindTy Kind = Vector;
  8381. llvm::APSInt StrideOrArg;
  8382. llvm::APSInt Alignment;
  8383. };
  8384. } // namespace
  8385. static unsigned evaluateCDTSize(const FunctionDecl *FD,
  8386. ArrayRef<ParamAttrTy> ParamAttrs) {
  8387. // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
  8388. // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
  8389. // of that clause. The VLEN value must be power of 2.
  8390. // In other case the notion of the function`s "characteristic data type" (CDT)
  8391. // is used to compute the vector length.
  8392. // CDT is defined in the following order:
  8393. // a) For non-void function, the CDT is the return type.
  8394. // b) If the function has any non-uniform, non-linear parameters, then the
  8395. // CDT is the type of the first such parameter.
  8396. // c) If the CDT determined by a) or b) above is struct, union, or class
  8397. // type which is pass-by-value (except for the type that maps to the
  8398. // built-in complex data type), the characteristic data type is int.
  8399. // d) If none of the above three cases is applicable, the CDT is int.
  8400. // The VLEN is then determined based on the CDT and the size of vector
  8401. // register of that ISA for which current vector version is generated. The
  8402. // VLEN is computed using the formula below:
  8403. // VLEN = sizeof(vector_register) / sizeof(CDT),
  8404. // where vector register size specified in section 3.2.1 Registers and the
  8405. // Stack Frame of original AMD64 ABI document.
  8406. QualType RetType = FD->getReturnType();
  8407. if (RetType.isNull())
  8408. return 0;
  8409. ASTContext &C = FD->getASTContext();
  8410. QualType CDT;
  8411. if (!RetType.isNull() && !RetType->isVoidType()) {
  8412. CDT = RetType;
  8413. } else {
  8414. unsigned Offset = 0;
  8415. if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
  8416. if (ParamAttrs[Offset].Kind == Vector)
  8417. CDT = C.getPointerType(C.getRecordType(MD->getParent()));
  8418. ++Offset;
  8419. }
  8420. if (CDT.isNull()) {
  8421. for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
  8422. if (ParamAttrs[I + Offset].Kind == Vector) {
  8423. CDT = FD->getParamDecl(I)->getType();
  8424. break;
  8425. }
  8426. }
  8427. }
  8428. }
  8429. if (CDT.isNull())
  8430. CDT = C.IntTy;
  8431. CDT = CDT->getCanonicalTypeUnqualified();
  8432. if (CDT->isRecordType() || CDT->isUnionType())
  8433. CDT = C.IntTy;
  8434. return C.getTypeSize(CDT);
  8435. }
  8436. static void
  8437. emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
  8438. const llvm::APSInt &VLENVal,
  8439. ArrayRef<ParamAttrTy> ParamAttrs,
  8440. OMPDeclareSimdDeclAttr::BranchStateTy State) {
  8441. struct ISADataTy {
  8442. char ISA;
  8443. unsigned VecRegSize;
  8444. };
  8445. ISADataTy ISAData[] = {
  8446. {
  8447. 'b', 128
  8448. }, // SSE
  8449. {
  8450. 'c', 256
  8451. }, // AVX
  8452. {
  8453. 'd', 256
  8454. }, // AVX2
  8455. {
  8456. 'e', 512
  8457. }, // AVX512
  8458. };
  8459. llvm::SmallVector<char, 2> Masked;
  8460. switch (State) {
  8461. case OMPDeclareSimdDeclAttr::BS_Undefined:
  8462. Masked.push_back('N');
  8463. Masked.push_back('M');
  8464. break;
  8465. case OMPDeclareSimdDeclAttr::BS_Notinbranch:
  8466. Masked.push_back('N');
  8467. break;
  8468. case OMPDeclareSimdDeclAttr::BS_Inbranch:
  8469. Masked.push_back('M');
  8470. break;
  8471. }
  8472. for (char Mask : Masked) {
  8473. for (const ISADataTy &Data : ISAData) {
  8474. SmallString<256> Buffer;
  8475. llvm::raw_svector_ostream Out(Buffer);
  8476. Out << "_ZGV" << Data.ISA << Mask;
  8477. if (!VLENVal) {
  8478. Out << llvm::APSInt::getUnsigned(Data.VecRegSize /
  8479. evaluateCDTSize(FD, ParamAttrs));
  8480. } else {
  8481. Out << VLENVal;
  8482. }
  8483. for (const ParamAttrTy &ParamAttr : ParamAttrs) {
  8484. switch (ParamAttr.Kind){
  8485. case LinearWithVarStride:
  8486. Out << 's' << ParamAttr.StrideOrArg;
  8487. break;
  8488. case Linear:
  8489. Out << 'l';
  8490. if (!!ParamAttr.StrideOrArg)
  8491. Out << ParamAttr.StrideOrArg;
  8492. break;
  8493. case Uniform:
  8494. Out << 'u';
  8495. break;
  8496. case Vector:
  8497. Out << 'v';
  8498. break;
  8499. }
  8500. if (!!ParamAttr.Alignment)
  8501. Out << 'a' << ParamAttr.Alignment;
  8502. }
  8503. Out << '_' << Fn->getName();
  8504. Fn->addFnAttr(Out.str());
  8505. }
  8506. }
  8507. }
  8508. void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
  8509. llvm::Function *Fn) {
  8510. ASTContext &C = CGM.getContext();
  8511. FD = FD->getMostRecentDecl();
  8512. // Map params to their positions in function decl.
  8513. llvm::DenseMap<const Decl *, unsigned> ParamPositions;
  8514. if (isa<CXXMethodDecl>(FD))
  8515. ParamPositions.try_emplace(FD, 0);
  8516. unsigned ParamPos = ParamPositions.size();
  8517. for (const ParmVarDecl *P : FD->parameters()) {
  8518. ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
  8519. ++ParamPos;
  8520. }
  8521. while (FD) {
  8522. for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
  8523. llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
  8524. // Mark uniform parameters.
  8525. for (const Expr *E : Attr->uniforms()) {
  8526. E = E->IgnoreParenImpCasts();
  8527. unsigned Pos;
  8528. if (isa<CXXThisExpr>(E)) {
  8529. Pos = ParamPositions[FD];
  8530. } else {
  8531. const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
  8532. ->getCanonicalDecl();
  8533. Pos = ParamPositions[PVD];
  8534. }
  8535. ParamAttrs[Pos].Kind = Uniform;
  8536. }
  8537. // Get alignment info.
  8538. auto NI = Attr->alignments_begin();
  8539. for (const Expr *E : Attr->aligneds()) {
  8540. E = E->IgnoreParenImpCasts();
  8541. unsigned Pos;
  8542. QualType ParmTy;
  8543. if (isa<CXXThisExpr>(E)) {
  8544. Pos = ParamPositions[FD];
  8545. ParmTy = E->getType();
  8546. } else {
  8547. const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
  8548. ->getCanonicalDecl();
  8549. Pos = ParamPositions[PVD];
  8550. ParmTy = PVD->getType();
  8551. }
  8552. ParamAttrs[Pos].Alignment =
  8553. (*NI)
  8554. ? (*NI)->EvaluateKnownConstInt(C)
  8555. : llvm::APSInt::getUnsigned(
  8556. C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
  8557. .getQuantity());
  8558. ++NI;
  8559. }
  8560. // Mark linear parameters.
  8561. auto SI = Attr->steps_begin();
  8562. auto MI = Attr->modifiers_begin();
  8563. for (const Expr *E : Attr->linears()) {
  8564. E = E->IgnoreParenImpCasts();
  8565. unsigned Pos;
  8566. if (isa<CXXThisExpr>(E)) {
  8567. Pos = ParamPositions[FD];
  8568. } else {
  8569. const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
  8570. ->getCanonicalDecl();
  8571. Pos = ParamPositions[PVD];
  8572. }
  8573. ParamAttrTy &ParamAttr = ParamAttrs[Pos];
  8574. ParamAttr.Kind = Linear;
  8575. if (*SI) {
  8576. if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
  8577. Expr::SE_AllowSideEffects)) {
  8578. if (const auto *DRE =
  8579. cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
  8580. if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
  8581. ParamAttr.Kind = LinearWithVarStride;
  8582. ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
  8583. ParamPositions[StridePVD->getCanonicalDecl()]);
  8584. }
  8585. }
  8586. }
  8587. }
  8588. ++SI;
  8589. ++MI;
  8590. }
  8591. llvm::APSInt VLENVal;
  8592. if (const Expr *VLEN = Attr->getSimdlen())
  8593. VLENVal = VLEN->EvaluateKnownConstInt(C);
  8594. OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
  8595. if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
  8596. CGM.getTriple().getArch() == llvm::Triple::x86_64)
  8597. emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
  8598. }
  8599. FD = FD->getPreviousDecl();
  8600. }
  8601. }
  8602. namespace {
  8603. /// Cleanup action for doacross support.
  8604. class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
  8605. public:
  8606. static const int DoacrossFinArgs = 2;
  8607. private:
  8608. llvm::Value *RTLFn;
  8609. llvm::Value *Args[DoacrossFinArgs];
  8610. public:
  8611. DoacrossCleanupTy(llvm::Value *RTLFn, ArrayRef<llvm::Value *> CallArgs)
  8612. : RTLFn(RTLFn) {
  8613. assert(CallArgs.size() == DoacrossFinArgs);
  8614. std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
  8615. }
  8616. void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
  8617. if (!CGF.HaveInsertPoint())
  8618. return;
  8619. CGF.EmitRuntimeCall(RTLFn, Args);
  8620. }
  8621. };
  8622. } // namespace
  8623. void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
  8624. const OMPLoopDirective &D,
  8625. ArrayRef<Expr *> NumIterations) {
  8626. if (!CGF.HaveInsertPoint())
  8627. return;
  8628. ASTContext &C = CGM.getContext();
  8629. QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
  8630. RecordDecl *RD;
  8631. if (KmpDimTy.isNull()) {
  8632. // Build struct kmp_dim { // loop bounds info casted to kmp_int64
  8633. // kmp_int64 lo; // lower
  8634. // kmp_int64 up; // upper
  8635. // kmp_int64 st; // stride
  8636. // };
  8637. RD = C.buildImplicitRecord("kmp_dim");
  8638. RD->startDefinition();
  8639. addFieldToRecordDecl(C, RD, Int64Ty);
  8640. addFieldToRecordDecl(C, RD, Int64Ty);
  8641. addFieldToRecordDecl(C, RD, Int64Ty);
  8642. RD->completeDefinition();
  8643. KmpDimTy = C.getRecordType(RD);
  8644. } else {
  8645. RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
  8646. }
  8647. llvm::APInt Size(/*numBits=*/32, NumIterations.size());
  8648. QualType ArrayTy =
  8649. C.getConstantArrayType(KmpDimTy, Size, ArrayType::Normal, 0);
  8650. Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
  8651. CGF.EmitNullInitialization(DimsAddr, ArrayTy);
  8652. enum { LowerFD = 0, UpperFD, StrideFD };
  8653. // Fill dims with data.
  8654. for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
  8655. LValue DimsLVal =
  8656. CGF.MakeAddrLValue(CGF.Builder.CreateConstArrayGEP(
  8657. DimsAddr, I, C.getTypeSizeInChars(KmpDimTy)),
  8658. KmpDimTy);
  8659. // dims.upper = num_iterations;
  8660. LValue UpperLVal = CGF.EmitLValueForField(
  8661. DimsLVal, *std::next(RD->field_begin(), UpperFD));
  8662. llvm::Value *NumIterVal =
  8663. CGF.EmitScalarConversion(CGF.EmitScalarExpr(NumIterations[I]),
  8664. D.getNumIterations()->getType(), Int64Ty,
  8665. D.getNumIterations()->getExprLoc());
  8666. CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
  8667. // dims.stride = 1;
  8668. LValue StrideLVal = CGF.EmitLValueForField(
  8669. DimsLVal, *std::next(RD->field_begin(), StrideFD));
  8670. CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
  8671. StrideLVal);
  8672. }
  8673. // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
  8674. // kmp_int32 num_dims, struct kmp_dim * dims);
  8675. llvm::Value *Args[] = {
  8676. emitUpdateLocation(CGF, D.getBeginLoc()),
  8677. getThreadID(CGF, D.getBeginLoc()),
  8678. llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
  8679. CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
  8680. CGF.Builder
  8681. .CreateConstArrayGEP(DimsAddr, 0, C.getTypeSizeInChars(KmpDimTy))
  8682. .getPointer(),
  8683. CGM.VoidPtrTy)};
  8684. llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
  8685. CGF.EmitRuntimeCall(RTLFn, Args);
  8686. llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
  8687. emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
  8688. llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
  8689. CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
  8690. llvm::makeArrayRef(FiniArgs));
  8691. }
  8692. void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
  8693. const OMPDependClause *C) {
  8694. QualType Int64Ty =
  8695. CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
  8696. llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
  8697. QualType ArrayTy = CGM.getContext().getConstantArrayType(
  8698. Int64Ty, Size, ArrayType::Normal, 0);
  8699. Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
  8700. for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
  8701. const Expr *CounterVal = C->getLoopData(I);
  8702. assert(CounterVal);
  8703. llvm::Value *CntVal = CGF.EmitScalarConversion(
  8704. CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
  8705. CounterVal->getExprLoc());
  8706. CGF.EmitStoreOfScalar(
  8707. CntVal,
  8708. CGF.Builder.CreateConstArrayGEP(
  8709. CntAddr, I, CGM.getContext().getTypeSizeInChars(Int64Ty)),
  8710. /*Volatile=*/false, Int64Ty);
  8711. }
  8712. llvm::Value *Args[] = {
  8713. emitUpdateLocation(CGF, C->getBeginLoc()),
  8714. getThreadID(CGF, C->getBeginLoc()),
  8715. CGF.Builder
  8716. .CreateConstArrayGEP(CntAddr, 0,
  8717. CGM.getContext().getTypeSizeInChars(Int64Ty))
  8718. .getPointer()};
  8719. llvm::Value *RTLFn;
  8720. if (C->getDependencyKind() == OMPC_DEPEND_source) {
  8721. RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
  8722. } else {
  8723. assert(C->getDependencyKind() == OMPC_DEPEND_sink);
  8724. RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
  8725. }
  8726. CGF.EmitRuntimeCall(RTLFn, Args);
  8727. }
  8728. void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
  8729. llvm::Value *Callee,
  8730. ArrayRef<llvm::Value *> Args) const {
  8731. assert(Loc.isValid() && "Outlined function call location must be valid.");
  8732. auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
  8733. if (auto *Fn = dyn_cast<llvm::Function>(Callee)) {
  8734. if (Fn->doesNotThrow()) {
  8735. CGF.EmitNounwindRuntimeCall(Fn, Args);
  8736. return;
  8737. }
  8738. }
  8739. CGF.EmitRuntimeCall(Callee, Args);
  8740. }
  8741. void CGOpenMPRuntime::emitOutlinedFunctionCall(
  8742. CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
  8743. ArrayRef<llvm::Value *> Args) const {
  8744. emitCall(CGF, Loc, OutlinedFn, Args);
  8745. }
  8746. Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
  8747. const VarDecl *NativeParam,
  8748. const VarDecl *TargetParam) const {
  8749. return CGF.GetAddrOfLocalVar(NativeParam);
  8750. }
  8751. Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
  8752. const VarDecl *VD) {
  8753. return Address::invalid();
  8754. }
  8755. llvm::Value *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
  8756. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  8757. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
  8758. llvm_unreachable("Not supported in SIMD-only mode");
  8759. }
  8760. llvm::Value *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
  8761. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  8762. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
  8763. llvm_unreachable("Not supported in SIMD-only mode");
  8764. }
  8765. llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
  8766. const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
  8767. const VarDecl *PartIDVar, const VarDecl *TaskTVar,
  8768. OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
  8769. bool Tied, unsigned &NumberOfParts) {
  8770. llvm_unreachable("Not supported in SIMD-only mode");
  8771. }
  8772. void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
  8773. SourceLocation Loc,
  8774. llvm::Value *OutlinedFn,
  8775. ArrayRef<llvm::Value *> CapturedVars,
  8776. const Expr *IfCond) {
  8777. llvm_unreachable("Not supported in SIMD-only mode");
  8778. }
  8779. void CGOpenMPSIMDRuntime::emitCriticalRegion(
  8780. CodeGenFunction &CGF, StringRef CriticalName,
  8781. const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
  8782. const Expr *Hint) {
  8783. llvm_unreachable("Not supported in SIMD-only mode");
  8784. }
  8785. void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
  8786. const RegionCodeGenTy &MasterOpGen,
  8787. SourceLocation Loc) {
  8788. llvm_unreachable("Not supported in SIMD-only mode");
  8789. }
  8790. void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
  8791. SourceLocation Loc) {
  8792. llvm_unreachable("Not supported in SIMD-only mode");
  8793. }
  8794. void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
  8795. CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
  8796. SourceLocation Loc) {
  8797. llvm_unreachable("Not supported in SIMD-only mode");
  8798. }
  8799. void CGOpenMPSIMDRuntime::emitSingleRegion(
  8800. CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
  8801. SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
  8802. ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
  8803. ArrayRef<const Expr *> AssignmentOps) {
  8804. llvm_unreachable("Not supported in SIMD-only mode");
  8805. }
  8806. void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
  8807. const RegionCodeGenTy &OrderedOpGen,
  8808. SourceLocation Loc,
  8809. bool IsThreads) {
  8810. llvm_unreachable("Not supported in SIMD-only mode");
  8811. }
  8812. void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
  8813. SourceLocation Loc,
  8814. OpenMPDirectiveKind Kind,
  8815. bool EmitChecks,
  8816. bool ForceSimpleCall) {
  8817. llvm_unreachable("Not supported in SIMD-only mode");
  8818. }
  8819. void CGOpenMPSIMDRuntime::emitForDispatchInit(
  8820. CodeGenFunction &CGF, SourceLocation Loc,
  8821. const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
  8822. bool Ordered, const DispatchRTInput &DispatchValues) {
  8823. llvm_unreachable("Not supported in SIMD-only mode");
  8824. }
  8825. void CGOpenMPSIMDRuntime::emitForStaticInit(
  8826. CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
  8827. const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
  8828. llvm_unreachable("Not supported in SIMD-only mode");
  8829. }
  8830. void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
  8831. CodeGenFunction &CGF, SourceLocation Loc,
  8832. OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
  8833. llvm_unreachable("Not supported in SIMD-only mode");
  8834. }
  8835. void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
  8836. SourceLocation Loc,
  8837. unsigned IVSize,
  8838. bool IVSigned) {
  8839. llvm_unreachable("Not supported in SIMD-only mode");
  8840. }
  8841. void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
  8842. SourceLocation Loc,
  8843. OpenMPDirectiveKind DKind) {
  8844. llvm_unreachable("Not supported in SIMD-only mode");
  8845. }
  8846. llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
  8847. SourceLocation Loc,
  8848. unsigned IVSize, bool IVSigned,
  8849. Address IL, Address LB,
  8850. Address UB, Address ST) {
  8851. llvm_unreachable("Not supported in SIMD-only mode");
  8852. }
  8853. void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
  8854. llvm::Value *NumThreads,
  8855. SourceLocation Loc) {
  8856. llvm_unreachable("Not supported in SIMD-only mode");
  8857. }
  8858. void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
  8859. OpenMPProcBindClauseKind ProcBind,
  8860. SourceLocation Loc) {
  8861. llvm_unreachable("Not supported in SIMD-only mode");
  8862. }
  8863. Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
  8864. const VarDecl *VD,
  8865. Address VDAddr,
  8866. SourceLocation Loc) {
  8867. llvm_unreachable("Not supported in SIMD-only mode");
  8868. }
  8869. llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
  8870. const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
  8871. CodeGenFunction *CGF) {
  8872. llvm_unreachable("Not supported in SIMD-only mode");
  8873. }
  8874. Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
  8875. CodeGenFunction &CGF, QualType VarType, StringRef Name) {
  8876. llvm_unreachable("Not supported in SIMD-only mode");
  8877. }
  8878. void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
  8879. ArrayRef<const Expr *> Vars,
  8880. SourceLocation Loc) {
  8881. llvm_unreachable("Not supported in SIMD-only mode");
  8882. }
  8883. void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
  8884. const OMPExecutableDirective &D,
  8885. llvm::Value *TaskFunction,
  8886. QualType SharedsTy, Address Shareds,
  8887. const Expr *IfCond,
  8888. const OMPTaskDataTy &Data) {
  8889. llvm_unreachable("Not supported in SIMD-only mode");
  8890. }
  8891. void CGOpenMPSIMDRuntime::emitTaskLoopCall(
  8892. CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
  8893. llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
  8894. const Expr *IfCond, const OMPTaskDataTy &Data) {
  8895. llvm_unreachable("Not supported in SIMD-only mode");
  8896. }
  8897. void CGOpenMPSIMDRuntime::emitReduction(
  8898. CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
  8899. ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
  8900. ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
  8901. assert(Options.SimpleReduction && "Only simple reduction is expected.");
  8902. CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
  8903. ReductionOps, Options);
  8904. }
  8905. llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
  8906. CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
  8907. ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
  8908. llvm_unreachable("Not supported in SIMD-only mode");
  8909. }
  8910. void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
  8911. SourceLocation Loc,
  8912. ReductionCodeGen &RCG,
  8913. unsigned N) {
  8914. llvm_unreachable("Not supported in SIMD-only mode");
  8915. }
  8916. Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
  8917. SourceLocation Loc,
  8918. llvm::Value *ReductionsPtr,
  8919. LValue SharedLVal) {
  8920. llvm_unreachable("Not supported in SIMD-only mode");
  8921. }
  8922. void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
  8923. SourceLocation Loc) {
  8924. llvm_unreachable("Not supported in SIMD-only mode");
  8925. }
  8926. void CGOpenMPSIMDRuntime::emitCancellationPointCall(
  8927. CodeGenFunction &CGF, SourceLocation Loc,
  8928. OpenMPDirectiveKind CancelRegion) {
  8929. llvm_unreachable("Not supported in SIMD-only mode");
  8930. }
  8931. void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
  8932. SourceLocation Loc, const Expr *IfCond,
  8933. OpenMPDirectiveKind CancelRegion) {
  8934. llvm_unreachable("Not supported in SIMD-only mode");
  8935. }
  8936. void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
  8937. const OMPExecutableDirective &D, StringRef ParentName,
  8938. llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
  8939. bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
  8940. llvm_unreachable("Not supported in SIMD-only mode");
  8941. }
  8942. void CGOpenMPSIMDRuntime::emitTargetCall(CodeGenFunction &CGF,
  8943. const OMPExecutableDirective &D,
  8944. llvm::Value *OutlinedFn,
  8945. llvm::Value *OutlinedFnID,
  8946. const Expr *IfCond, const Expr *Device) {
  8947. llvm_unreachable("Not supported in SIMD-only mode");
  8948. }
  8949. bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
  8950. llvm_unreachable("Not supported in SIMD-only mode");
  8951. }
  8952. bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
  8953. llvm_unreachable("Not supported in SIMD-only mode");
  8954. }
  8955. bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
  8956. return false;
  8957. }
  8958. llvm::Function *CGOpenMPSIMDRuntime::emitRegistrationFunction() {
  8959. return nullptr;
  8960. }
  8961. void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
  8962. const OMPExecutableDirective &D,
  8963. SourceLocation Loc,
  8964. llvm::Value *OutlinedFn,
  8965. ArrayRef<llvm::Value *> CapturedVars) {
  8966. llvm_unreachable("Not supported in SIMD-only mode");
  8967. }
  8968. void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
  8969. const Expr *NumTeams,
  8970. const Expr *ThreadLimit,
  8971. SourceLocation Loc) {
  8972. llvm_unreachable("Not supported in SIMD-only mode");
  8973. }
  8974. void CGOpenMPSIMDRuntime::emitTargetDataCalls(
  8975. CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
  8976. const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
  8977. llvm_unreachable("Not supported in SIMD-only mode");
  8978. }
  8979. void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
  8980. CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
  8981. const Expr *Device) {
  8982. llvm_unreachable("Not supported in SIMD-only mode");
  8983. }
  8984. void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
  8985. const OMPLoopDirective &D,
  8986. ArrayRef<Expr *> NumIterations) {
  8987. llvm_unreachable("Not supported in SIMD-only mode");
  8988. }
  8989. void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
  8990. const OMPDependClause *C) {
  8991. llvm_unreachable("Not supported in SIMD-only mode");
  8992. }
  8993. const VarDecl *
  8994. CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
  8995. const VarDecl *NativeParam) const {
  8996. llvm_unreachable("Not supported in SIMD-only mode");
  8997. }
  8998. Address
  8999. CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
  9000. const VarDecl *NativeParam,
  9001. const VarDecl *TargetParam) const {
  9002. llvm_unreachable("Not supported in SIMD-only mode");
  9003. }