CGCall.cpp 169 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403
  1. //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // These classes wrap the information about a call or function
  11. // definition used to handle ABI compliancy.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "CGCall.h"
  15. #include "ABIInfo.h"
  16. #include "CGBlocks.h"
  17. #include "CGCXXABI.h"
  18. #include "CGCleanup.h"
  19. #include "CodeGenFunction.h"
  20. #include "CodeGenModule.h"
  21. #include "TargetInfo.h"
  22. #include "clang/AST/Decl.h"
  23. #include "clang/AST/DeclCXX.h"
  24. #include "clang/AST/DeclObjC.h"
  25. #include "clang/Basic/TargetBuiltins.h"
  26. #include "clang/Basic/TargetInfo.h"
  27. #include "clang/CodeGen/CGFunctionInfo.h"
  28. #include "clang/CodeGen/SwiftCallingConv.h"
  29. #include "clang/Frontend/CodeGenOptions.h"
  30. #include "llvm/ADT/StringExtras.h"
  31. #include "llvm/Analysis/ValueTracking.h"
  32. #include "llvm/IR/Attributes.h"
  33. #include "llvm/IR/CallingConv.h"
  34. #include "llvm/IR/CallSite.h"
  35. #include "llvm/IR/DataLayout.h"
  36. #include "llvm/IR/InlineAsm.h"
  37. #include "llvm/IR/Intrinsics.h"
  38. #include "llvm/IR/IntrinsicInst.h"
  39. #include "llvm/Transforms/Utils/Local.h"
  40. using namespace clang;
  41. using namespace CodeGen;
  42. /***/
  43. unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
  44. switch (CC) {
  45. default: return llvm::CallingConv::C;
  46. case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
  47. case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
  48. case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
  49. case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
  50. case CC_Win64: return llvm::CallingConv::Win64;
  51. case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
  52. case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
  53. case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
  54. case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
  55. // TODO: Add support for __pascal to LLVM.
  56. case CC_X86Pascal: return llvm::CallingConv::C;
  57. // TODO: Add support for __vectorcall to LLVM.
  58. case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
  59. case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
  60. case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
  61. case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
  62. case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
  63. case CC_Swift: return llvm::CallingConv::Swift;
  64. }
  65. }
  66. /// Derives the 'this' type for codegen purposes, i.e. ignoring method
  67. /// qualification.
  68. /// FIXME: address space qualification?
  69. static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
  70. QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
  71. return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
  72. }
  73. /// Returns the canonical formal type of the given C++ method.
  74. static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
  75. return MD->getType()->getCanonicalTypeUnqualified()
  76. .getAs<FunctionProtoType>();
  77. }
  78. /// Returns the "extra-canonicalized" return type, which discards
  79. /// qualifiers on the return type. Codegen doesn't care about them,
  80. /// and it makes ABI code a little easier to be able to assume that
  81. /// all parameter and return types are top-level unqualified.
  82. static CanQualType GetReturnType(QualType RetTy) {
  83. return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
  84. }
  85. /// Arrange the argument and result information for a value of the given
  86. /// unprototyped freestanding function type.
  87. const CGFunctionInfo &
  88. CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
  89. // When translating an unprototyped function type, always use a
  90. // variadic type.
  91. return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
  92. /*instanceMethod=*/false,
  93. /*chainCall=*/false, None,
  94. FTNP->getExtInfo(), {}, RequiredArgs(0));
  95. }
  96. static void addExtParameterInfosForCall(
  97. llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
  98. const FunctionProtoType *proto,
  99. unsigned prefixArgs,
  100. unsigned totalArgs) {
  101. assert(proto->hasExtParameterInfos());
  102. assert(paramInfos.size() <= prefixArgs);
  103. assert(proto->getNumParams() + prefixArgs <= totalArgs);
  104. paramInfos.reserve(totalArgs);
  105. // Add default infos for any prefix args that don't already have infos.
  106. paramInfos.resize(prefixArgs);
  107. // Add infos for the prototype.
  108. for (const auto &ParamInfo : proto->getExtParameterInfos()) {
  109. paramInfos.push_back(ParamInfo);
  110. // pass_object_size params have no parameter info.
  111. if (ParamInfo.hasPassObjectSize())
  112. paramInfos.emplace_back();
  113. }
  114. assert(paramInfos.size() <= totalArgs &&
  115. "Did we forget to insert pass_object_size args?");
  116. // Add default infos for the variadic and/or suffix arguments.
  117. paramInfos.resize(totalArgs);
  118. }
  119. /// Adds the formal parameters in FPT to the given prefix. If any parameter in
  120. /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
  121. static void appendParameterTypes(const CodeGenTypes &CGT,
  122. SmallVectorImpl<CanQualType> &prefix,
  123. SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
  124. CanQual<FunctionProtoType> FPT) {
  125. // Fast path: don't touch param info if we don't need to.
  126. if (!FPT->hasExtParameterInfos()) {
  127. assert(paramInfos.empty() &&
  128. "We have paramInfos, but the prototype doesn't?");
  129. prefix.append(FPT->param_type_begin(), FPT->param_type_end());
  130. return;
  131. }
  132. unsigned PrefixSize = prefix.size();
  133. // In the vast majority of cases, we'll have precisely FPT->getNumParams()
  134. // parameters; the only thing that can change this is the presence of
  135. // pass_object_size. So, we preallocate for the common case.
  136. prefix.reserve(prefix.size() + FPT->getNumParams());
  137. auto ExtInfos = FPT->getExtParameterInfos();
  138. assert(ExtInfos.size() == FPT->getNumParams());
  139. for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
  140. prefix.push_back(FPT->getParamType(I));
  141. if (ExtInfos[I].hasPassObjectSize())
  142. prefix.push_back(CGT.getContext().getSizeType());
  143. }
  144. addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
  145. prefix.size());
  146. }
  147. /// Arrange the LLVM function layout for a value of the given function
  148. /// type, on top of any implicit parameters already stored.
  149. static const CGFunctionInfo &
  150. arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
  151. SmallVectorImpl<CanQualType> &prefix,
  152. CanQual<FunctionProtoType> FTP,
  153. const FunctionDecl *FD) {
  154. SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
  155. RequiredArgs Required =
  156. RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
  157. // FIXME: Kill copy.
  158. appendParameterTypes(CGT, prefix, paramInfos, FTP);
  159. CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
  160. return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
  161. /*chainCall=*/false, prefix,
  162. FTP->getExtInfo(), paramInfos,
  163. Required);
  164. }
  165. /// Arrange the argument and result information for a value of the
  166. /// given freestanding function type.
  167. const CGFunctionInfo &
  168. CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
  169. const FunctionDecl *FD) {
  170. SmallVector<CanQualType, 16> argTypes;
  171. return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
  172. FTP, FD);
  173. }
  174. static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
  175. // Set the appropriate calling convention for the Function.
  176. if (D->hasAttr<StdCallAttr>())
  177. return CC_X86StdCall;
  178. if (D->hasAttr<FastCallAttr>())
  179. return CC_X86FastCall;
  180. if (D->hasAttr<RegCallAttr>())
  181. return CC_X86RegCall;
  182. if (D->hasAttr<ThisCallAttr>())
  183. return CC_X86ThisCall;
  184. if (D->hasAttr<VectorCallAttr>())
  185. return CC_X86VectorCall;
  186. if (D->hasAttr<PascalAttr>())
  187. return CC_X86Pascal;
  188. if (PcsAttr *PCS = D->getAttr<PcsAttr>())
  189. return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
  190. if (D->hasAttr<IntelOclBiccAttr>())
  191. return CC_IntelOclBicc;
  192. if (D->hasAttr<MSABIAttr>())
  193. return IsWindows ? CC_C : CC_Win64;
  194. if (D->hasAttr<SysVABIAttr>())
  195. return IsWindows ? CC_X86_64SysV : CC_C;
  196. if (D->hasAttr<PreserveMostAttr>())
  197. return CC_PreserveMost;
  198. if (D->hasAttr<PreserveAllAttr>())
  199. return CC_PreserveAll;
  200. return CC_C;
  201. }
  202. /// Arrange the argument and result information for a call to an
  203. /// unknown C++ non-static member function of the given abstract type.
  204. /// (Zero value of RD means we don't have any meaningful "this" argument type,
  205. /// so fall back to a generic pointer type).
  206. /// The member function must be an ordinary function, i.e. not a
  207. /// constructor or destructor.
  208. const CGFunctionInfo &
  209. CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
  210. const FunctionProtoType *FTP,
  211. const CXXMethodDecl *MD) {
  212. SmallVector<CanQualType, 16> argTypes;
  213. // Add the 'this' pointer.
  214. if (RD)
  215. argTypes.push_back(GetThisType(Context, RD));
  216. else
  217. argTypes.push_back(Context.VoidPtrTy);
  218. return ::arrangeLLVMFunctionInfo(
  219. *this, true, argTypes,
  220. FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
  221. }
  222. /// Arrange the argument and result information for a declaration or
  223. /// definition of the given C++ non-static member function. The
  224. /// member function must be an ordinary function, i.e. not a
  225. /// constructor or destructor.
  226. const CGFunctionInfo &
  227. CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
  228. assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
  229. assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
  230. CanQual<FunctionProtoType> prototype = GetFormalType(MD);
  231. if (MD->isInstance()) {
  232. // The abstract case is perfectly fine.
  233. const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
  234. return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
  235. }
  236. return arrangeFreeFunctionType(prototype, MD);
  237. }
  238. bool CodeGenTypes::inheritingCtorHasParams(
  239. const InheritedConstructor &Inherited, CXXCtorType Type) {
  240. // Parameters are unnecessary if we're constructing a base class subobject
  241. // and the inherited constructor lives in a virtual base.
  242. return Type == Ctor_Complete ||
  243. !Inherited.getShadowDecl()->constructsVirtualBase() ||
  244. !Target.getCXXABI().hasConstructorVariants();
  245. }
  246. const CGFunctionInfo &
  247. CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
  248. StructorType Type) {
  249. SmallVector<CanQualType, 16> argTypes;
  250. SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
  251. argTypes.push_back(GetThisType(Context, MD->getParent()));
  252. bool PassParams = true;
  253. GlobalDecl GD;
  254. if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
  255. GD = GlobalDecl(CD, toCXXCtorType(Type));
  256. // A base class inheriting constructor doesn't get forwarded arguments
  257. // needed to construct a virtual base (or base class thereof).
  258. if (auto Inherited = CD->getInheritedConstructor())
  259. PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
  260. } else {
  261. auto *DD = dyn_cast<CXXDestructorDecl>(MD);
  262. GD = GlobalDecl(DD, toCXXDtorType(Type));
  263. }
  264. CanQual<FunctionProtoType> FTP = GetFormalType(MD);
  265. // Add the formal parameters.
  266. if (PassParams)
  267. appendParameterTypes(*this, argTypes, paramInfos, FTP);
  268. CGCXXABI::AddedStructorArgs AddedArgs =
  269. TheCXXABI.buildStructorSignature(MD, Type, argTypes);
  270. if (!paramInfos.empty()) {
  271. // Note: prefix implies after the first param.
  272. if (AddedArgs.Prefix)
  273. paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
  274. FunctionProtoType::ExtParameterInfo{});
  275. if (AddedArgs.Suffix)
  276. paramInfos.append(AddedArgs.Suffix,
  277. FunctionProtoType::ExtParameterInfo{});
  278. }
  279. RequiredArgs required =
  280. (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
  281. : RequiredArgs::All);
  282. FunctionType::ExtInfo extInfo = FTP->getExtInfo();
  283. CanQualType resultType = TheCXXABI.HasThisReturn(GD)
  284. ? argTypes.front()
  285. : TheCXXABI.hasMostDerivedReturn(GD)
  286. ? CGM.getContext().VoidPtrTy
  287. : Context.VoidTy;
  288. return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
  289. /*chainCall=*/false, argTypes, extInfo,
  290. paramInfos, required);
  291. }
  292. static SmallVector<CanQualType, 16>
  293. getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
  294. SmallVector<CanQualType, 16> argTypes;
  295. for (auto &arg : args)
  296. argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
  297. return argTypes;
  298. }
  299. static SmallVector<CanQualType, 16>
  300. getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
  301. SmallVector<CanQualType, 16> argTypes;
  302. for (auto &arg : args)
  303. argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
  304. return argTypes;
  305. }
  306. static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
  307. getExtParameterInfosForCall(const FunctionProtoType *proto,
  308. unsigned prefixArgs, unsigned totalArgs) {
  309. llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
  310. if (proto->hasExtParameterInfos()) {
  311. addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
  312. }
  313. return result;
  314. }
  315. /// Arrange a call to a C++ method, passing the given arguments.
  316. ///
  317. /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
  318. /// parameter.
  319. /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
  320. /// args.
  321. /// PassProtoArgs indicates whether `args` has args for the parameters in the
  322. /// given CXXConstructorDecl.
  323. const CGFunctionInfo &
  324. CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
  325. const CXXConstructorDecl *D,
  326. CXXCtorType CtorKind,
  327. unsigned ExtraPrefixArgs,
  328. unsigned ExtraSuffixArgs,
  329. bool PassProtoArgs) {
  330. // FIXME: Kill copy.
  331. SmallVector<CanQualType, 16> ArgTypes;
  332. for (const auto &Arg : args)
  333. ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
  334. // +1 for implicit this, which should always be args[0].
  335. unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
  336. CanQual<FunctionProtoType> FPT = GetFormalType(D);
  337. RequiredArgs Required =
  338. RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
  339. GlobalDecl GD(D, CtorKind);
  340. CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
  341. ? ArgTypes.front()
  342. : TheCXXABI.hasMostDerivedReturn(GD)
  343. ? CGM.getContext().VoidPtrTy
  344. : Context.VoidTy;
  345. FunctionType::ExtInfo Info = FPT->getExtInfo();
  346. llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
  347. // If the prototype args are elided, we should only have ABI-specific args,
  348. // which never have param info.
  349. if (PassProtoArgs && FPT->hasExtParameterInfos()) {
  350. // ABI-specific suffix arguments are treated the same as variadic arguments.
  351. addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
  352. ArgTypes.size());
  353. }
  354. return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
  355. /*chainCall=*/false, ArgTypes, Info,
  356. ParamInfos, Required);
  357. }
  358. /// Arrange the argument and result information for the declaration or
  359. /// definition of the given function.
  360. const CGFunctionInfo &
  361. CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
  362. if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
  363. if (MD->isInstance())
  364. return arrangeCXXMethodDeclaration(MD);
  365. CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
  366. assert(isa<FunctionType>(FTy));
  367. // When declaring a function without a prototype, always use a
  368. // non-variadic type.
  369. if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
  370. return arrangeLLVMFunctionInfo(
  371. noProto->getReturnType(), /*instanceMethod=*/false,
  372. /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
  373. }
  374. return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
  375. }
  376. /// Arrange the argument and result information for the declaration or
  377. /// definition of an Objective-C method.
  378. const CGFunctionInfo &
  379. CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
  380. // It happens that this is the same as a call with no optional
  381. // arguments, except also using the formal 'self' type.
  382. return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
  383. }
  384. /// Arrange the argument and result information for the function type
  385. /// through which to perform a send to the given Objective-C method,
  386. /// using the given receiver type. The receiver type is not always
  387. /// the 'self' type of the method or even an Objective-C pointer type.
  388. /// This is *not* the right method for actually performing such a
  389. /// message send, due to the possibility of optional arguments.
  390. const CGFunctionInfo &
  391. CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
  392. QualType receiverType) {
  393. SmallVector<CanQualType, 16> argTys;
  394. SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
  395. argTys.push_back(Context.getCanonicalParamType(receiverType));
  396. argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
  397. // FIXME: Kill copy?
  398. for (const auto *I : MD->parameters()) {
  399. argTys.push_back(Context.getCanonicalParamType(I->getType()));
  400. auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
  401. I->hasAttr<NoEscapeAttr>());
  402. extParamInfos.push_back(extParamInfo);
  403. }
  404. FunctionType::ExtInfo einfo;
  405. bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
  406. einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
  407. if (getContext().getLangOpts().ObjCAutoRefCount &&
  408. MD->hasAttr<NSReturnsRetainedAttr>())
  409. einfo = einfo.withProducesResult(true);
  410. RequiredArgs required =
  411. (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
  412. return arrangeLLVMFunctionInfo(
  413. GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
  414. /*chainCall=*/false, argTys, einfo, extParamInfos, required);
  415. }
  416. const CGFunctionInfo &
  417. CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
  418. const CallArgList &args) {
  419. auto argTypes = getArgTypesForCall(Context, args);
  420. FunctionType::ExtInfo einfo;
  421. return arrangeLLVMFunctionInfo(
  422. GetReturnType(returnType), /*instanceMethod=*/false,
  423. /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
  424. }
  425. const CGFunctionInfo &
  426. CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
  427. // FIXME: Do we need to handle ObjCMethodDecl?
  428. const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
  429. if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
  430. return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
  431. if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
  432. return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
  433. return arrangeFunctionDeclaration(FD);
  434. }
  435. /// Arrange a thunk that takes 'this' as the first parameter followed by
  436. /// varargs. Return a void pointer, regardless of the actual return type.
  437. /// The body of the thunk will end in a musttail call to a function of the
  438. /// correct type, and the caller will bitcast the function to the correct
  439. /// prototype.
  440. const CGFunctionInfo &
  441. CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
  442. assert(MD->isVirtual() && "only virtual memptrs have thunks");
  443. CanQual<FunctionProtoType> FTP = GetFormalType(MD);
  444. CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
  445. return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
  446. /*chainCall=*/false, ArgTys,
  447. FTP->getExtInfo(), {}, RequiredArgs(1));
  448. }
  449. const CGFunctionInfo &
  450. CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
  451. CXXCtorType CT) {
  452. assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
  453. CanQual<FunctionProtoType> FTP = GetFormalType(CD);
  454. SmallVector<CanQualType, 2> ArgTys;
  455. const CXXRecordDecl *RD = CD->getParent();
  456. ArgTys.push_back(GetThisType(Context, RD));
  457. if (CT == Ctor_CopyingClosure)
  458. ArgTys.push_back(*FTP->param_type_begin());
  459. if (RD->getNumVBases() > 0)
  460. ArgTys.push_back(Context.IntTy);
  461. CallingConv CC = Context.getDefaultCallingConvention(
  462. /*IsVariadic=*/false, /*IsCXXMethod=*/true);
  463. return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
  464. /*chainCall=*/false, ArgTys,
  465. FunctionType::ExtInfo(CC), {},
  466. RequiredArgs::All);
  467. }
  468. /// Arrange a call as unto a free function, except possibly with an
  469. /// additional number of formal parameters considered required.
  470. static const CGFunctionInfo &
  471. arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
  472. CodeGenModule &CGM,
  473. const CallArgList &args,
  474. const FunctionType *fnType,
  475. unsigned numExtraRequiredArgs,
  476. bool chainCall) {
  477. assert(args.size() >= numExtraRequiredArgs);
  478. llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
  479. // In most cases, there are no optional arguments.
  480. RequiredArgs required = RequiredArgs::All;
  481. // If we have a variadic prototype, the required arguments are the
  482. // extra prefix plus the arguments in the prototype.
  483. if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
  484. if (proto->isVariadic())
  485. required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
  486. if (proto->hasExtParameterInfos())
  487. addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
  488. args.size());
  489. // If we don't have a prototype at all, but we're supposed to
  490. // explicitly use the variadic convention for unprototyped calls,
  491. // treat all of the arguments as required but preserve the nominal
  492. // possibility of variadics.
  493. } else if (CGM.getTargetCodeGenInfo()
  494. .isNoProtoCallVariadic(args,
  495. cast<FunctionNoProtoType>(fnType))) {
  496. required = RequiredArgs(args.size());
  497. }
  498. // FIXME: Kill copy.
  499. SmallVector<CanQualType, 16> argTypes;
  500. for (const auto &arg : args)
  501. argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
  502. return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
  503. /*instanceMethod=*/false, chainCall,
  504. argTypes, fnType->getExtInfo(), paramInfos,
  505. required);
  506. }
  507. /// Figure out the rules for calling a function with the given formal
  508. /// type using the given arguments. The arguments are necessary
  509. /// because the function might be unprototyped, in which case it's
  510. /// target-dependent in crazy ways.
  511. const CGFunctionInfo &
  512. CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
  513. const FunctionType *fnType,
  514. bool chainCall) {
  515. return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
  516. chainCall ? 1 : 0, chainCall);
  517. }
  518. /// A block function is essentially a free function with an
  519. /// extra implicit argument.
  520. const CGFunctionInfo &
  521. CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
  522. const FunctionType *fnType) {
  523. return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
  524. /*chainCall=*/false);
  525. }
  526. const CGFunctionInfo &
  527. CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
  528. const FunctionArgList &params) {
  529. auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
  530. auto argTypes = getArgTypesForDeclaration(Context, params);
  531. return arrangeLLVMFunctionInfo(
  532. GetReturnType(proto->getReturnType()),
  533. /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
  534. proto->getExtInfo(), paramInfos,
  535. RequiredArgs::forPrototypePlus(proto, 1, nullptr));
  536. }
  537. const CGFunctionInfo &
  538. CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
  539. const CallArgList &args) {
  540. // FIXME: Kill copy.
  541. SmallVector<CanQualType, 16> argTypes;
  542. for (const auto &Arg : args)
  543. argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
  544. return arrangeLLVMFunctionInfo(
  545. GetReturnType(resultType), /*instanceMethod=*/false,
  546. /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
  547. /*paramInfos=*/ {}, RequiredArgs::All);
  548. }
  549. const CGFunctionInfo &
  550. CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
  551. const FunctionArgList &args) {
  552. auto argTypes = getArgTypesForDeclaration(Context, args);
  553. return arrangeLLVMFunctionInfo(
  554. GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
  555. argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
  556. }
  557. const CGFunctionInfo &
  558. CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
  559. ArrayRef<CanQualType> argTypes) {
  560. return arrangeLLVMFunctionInfo(
  561. resultType, /*instanceMethod=*/false, /*chainCall=*/false,
  562. argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
  563. }
  564. /// Arrange a call to a C++ method, passing the given arguments.
  565. ///
  566. /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
  567. /// does not count `this`.
  568. const CGFunctionInfo &
  569. CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
  570. const FunctionProtoType *proto,
  571. RequiredArgs required,
  572. unsigned numPrefixArgs) {
  573. assert(numPrefixArgs + 1 <= args.size() &&
  574. "Emitting a call with less args than the required prefix?");
  575. // Add one to account for `this`. It's a bit awkward here, but we don't count
  576. // `this` in similar places elsewhere.
  577. auto paramInfos =
  578. getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
  579. // FIXME: Kill copy.
  580. auto argTypes = getArgTypesForCall(Context, args);
  581. FunctionType::ExtInfo info = proto->getExtInfo();
  582. return arrangeLLVMFunctionInfo(
  583. GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
  584. /*chainCall=*/false, argTypes, info, paramInfos, required);
  585. }
  586. const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
  587. return arrangeLLVMFunctionInfo(
  588. getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
  589. None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
  590. }
  591. const CGFunctionInfo &
  592. CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
  593. const CallArgList &args) {
  594. assert(signature.arg_size() <= args.size());
  595. if (signature.arg_size() == args.size())
  596. return signature;
  597. SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
  598. auto sigParamInfos = signature.getExtParameterInfos();
  599. if (!sigParamInfos.empty()) {
  600. paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
  601. paramInfos.resize(args.size());
  602. }
  603. auto argTypes = getArgTypesForCall(Context, args);
  604. assert(signature.getRequiredArgs().allowsOptionalArgs());
  605. return arrangeLLVMFunctionInfo(signature.getReturnType(),
  606. signature.isInstanceMethod(),
  607. signature.isChainCall(),
  608. argTypes,
  609. signature.getExtInfo(),
  610. paramInfos,
  611. signature.getRequiredArgs());
  612. }
  613. namespace clang {
  614. namespace CodeGen {
  615. void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
  616. }
  617. }
  618. /// Arrange the argument and result information for an abstract value
  619. /// of a given function type. This is the method which all of the
  620. /// above functions ultimately defer to.
  621. const CGFunctionInfo &
  622. CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
  623. bool instanceMethod,
  624. bool chainCall,
  625. ArrayRef<CanQualType> argTypes,
  626. FunctionType::ExtInfo info,
  627. ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
  628. RequiredArgs required) {
  629. assert(std::all_of(argTypes.begin(), argTypes.end(),
  630. [](CanQualType T) { return T.isCanonicalAsParam(); }));
  631. // Lookup or create unique function info.
  632. llvm::FoldingSetNodeID ID;
  633. CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
  634. required, resultType, argTypes);
  635. void *insertPos = nullptr;
  636. CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
  637. if (FI)
  638. return *FI;
  639. unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
  640. // Construct the function info. We co-allocate the ArgInfos.
  641. FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
  642. paramInfos, resultType, argTypes, required);
  643. FunctionInfos.InsertNode(FI, insertPos);
  644. bool inserted = FunctionsBeingProcessed.insert(FI).second;
  645. (void)inserted;
  646. assert(inserted && "Recursively being processed?");
  647. // Compute ABI information.
  648. if (CC == llvm::CallingConv::SPIR_KERNEL) {
  649. // Force target independent argument handling for the host visible
  650. // kernel functions.
  651. computeSPIRKernelABIInfo(CGM, *FI);
  652. } else if (info.getCC() == CC_Swift) {
  653. swiftcall::computeABIInfo(CGM, *FI);
  654. } else {
  655. getABIInfo().computeInfo(*FI);
  656. }
  657. // Loop over all of the computed argument and return value info. If any of
  658. // them are direct or extend without a specified coerce type, specify the
  659. // default now.
  660. ABIArgInfo &retInfo = FI->getReturnInfo();
  661. if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
  662. retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
  663. for (auto &I : FI->arguments())
  664. if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
  665. I.info.setCoerceToType(ConvertType(I.type));
  666. bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
  667. assert(erased && "Not in set?");
  668. return *FI;
  669. }
  670. CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
  671. bool instanceMethod,
  672. bool chainCall,
  673. const FunctionType::ExtInfo &info,
  674. ArrayRef<ExtParameterInfo> paramInfos,
  675. CanQualType resultType,
  676. ArrayRef<CanQualType> argTypes,
  677. RequiredArgs required) {
  678. assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
  679. void *buffer =
  680. operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
  681. argTypes.size() + 1, paramInfos.size()));
  682. CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
  683. FI->CallingConvention = llvmCC;
  684. FI->EffectiveCallingConvention = llvmCC;
  685. FI->ASTCallingConvention = info.getCC();
  686. FI->InstanceMethod = instanceMethod;
  687. FI->ChainCall = chainCall;
  688. FI->NoReturn = info.getNoReturn();
  689. FI->ReturnsRetained = info.getProducesResult();
  690. FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
  691. FI->Required = required;
  692. FI->HasRegParm = info.getHasRegParm();
  693. FI->RegParm = info.getRegParm();
  694. FI->ArgStruct = nullptr;
  695. FI->ArgStructAlign = 0;
  696. FI->NumArgs = argTypes.size();
  697. FI->HasExtParameterInfos = !paramInfos.empty();
  698. FI->getArgsBuffer()[0].type = resultType;
  699. for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
  700. FI->getArgsBuffer()[i + 1].type = argTypes[i];
  701. for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
  702. FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
  703. return FI;
  704. }
  705. /***/
  706. namespace {
  707. // ABIArgInfo::Expand implementation.
  708. // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
  709. struct TypeExpansion {
  710. enum TypeExpansionKind {
  711. // Elements of constant arrays are expanded recursively.
  712. TEK_ConstantArray,
  713. // Record fields are expanded recursively (but if record is a union, only
  714. // the field with the largest size is expanded).
  715. TEK_Record,
  716. // For complex types, real and imaginary parts are expanded recursively.
  717. TEK_Complex,
  718. // All other types are not expandable.
  719. TEK_None
  720. };
  721. const TypeExpansionKind Kind;
  722. TypeExpansion(TypeExpansionKind K) : Kind(K) {}
  723. virtual ~TypeExpansion() {}
  724. };
  725. struct ConstantArrayExpansion : TypeExpansion {
  726. QualType EltTy;
  727. uint64_t NumElts;
  728. ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
  729. : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
  730. static bool classof(const TypeExpansion *TE) {
  731. return TE->Kind == TEK_ConstantArray;
  732. }
  733. };
  734. struct RecordExpansion : TypeExpansion {
  735. SmallVector<const CXXBaseSpecifier *, 1> Bases;
  736. SmallVector<const FieldDecl *, 1> Fields;
  737. RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
  738. SmallVector<const FieldDecl *, 1> &&Fields)
  739. : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
  740. Fields(std::move(Fields)) {}
  741. static bool classof(const TypeExpansion *TE) {
  742. return TE->Kind == TEK_Record;
  743. }
  744. };
  745. struct ComplexExpansion : TypeExpansion {
  746. QualType EltTy;
  747. ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
  748. static bool classof(const TypeExpansion *TE) {
  749. return TE->Kind == TEK_Complex;
  750. }
  751. };
  752. struct NoExpansion : TypeExpansion {
  753. NoExpansion() : TypeExpansion(TEK_None) {}
  754. static bool classof(const TypeExpansion *TE) {
  755. return TE->Kind == TEK_None;
  756. }
  757. };
  758. } // namespace
  759. static std::unique_ptr<TypeExpansion>
  760. getTypeExpansion(QualType Ty, const ASTContext &Context) {
  761. if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
  762. return llvm::make_unique<ConstantArrayExpansion>(
  763. AT->getElementType(), AT->getSize().getZExtValue());
  764. }
  765. if (const RecordType *RT = Ty->getAs<RecordType>()) {
  766. SmallVector<const CXXBaseSpecifier *, 1> Bases;
  767. SmallVector<const FieldDecl *, 1> Fields;
  768. const RecordDecl *RD = RT->getDecl();
  769. assert(!RD->hasFlexibleArrayMember() &&
  770. "Cannot expand structure with flexible array.");
  771. if (RD->isUnion()) {
  772. // Unions can be here only in degenerative cases - all the fields are same
  773. // after flattening. Thus we have to use the "largest" field.
  774. const FieldDecl *LargestFD = nullptr;
  775. CharUnits UnionSize = CharUnits::Zero();
  776. for (const auto *FD : RD->fields()) {
  777. // Skip zero length bitfields.
  778. if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
  779. continue;
  780. assert(!FD->isBitField() &&
  781. "Cannot expand structure with bit-field members.");
  782. CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
  783. if (UnionSize < FieldSize) {
  784. UnionSize = FieldSize;
  785. LargestFD = FD;
  786. }
  787. }
  788. if (LargestFD)
  789. Fields.push_back(LargestFD);
  790. } else {
  791. if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
  792. assert(!CXXRD->isDynamicClass() &&
  793. "cannot expand vtable pointers in dynamic classes");
  794. for (const CXXBaseSpecifier &BS : CXXRD->bases())
  795. Bases.push_back(&BS);
  796. }
  797. for (const auto *FD : RD->fields()) {
  798. // Skip zero length bitfields.
  799. if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
  800. continue;
  801. assert(!FD->isBitField() &&
  802. "Cannot expand structure with bit-field members.");
  803. Fields.push_back(FD);
  804. }
  805. }
  806. return llvm::make_unique<RecordExpansion>(std::move(Bases),
  807. std::move(Fields));
  808. }
  809. if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
  810. return llvm::make_unique<ComplexExpansion>(CT->getElementType());
  811. }
  812. return llvm::make_unique<NoExpansion>();
  813. }
  814. static int getExpansionSize(QualType Ty, const ASTContext &Context) {
  815. auto Exp = getTypeExpansion(Ty, Context);
  816. if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
  817. return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
  818. }
  819. if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
  820. int Res = 0;
  821. for (auto BS : RExp->Bases)
  822. Res += getExpansionSize(BS->getType(), Context);
  823. for (auto FD : RExp->Fields)
  824. Res += getExpansionSize(FD->getType(), Context);
  825. return Res;
  826. }
  827. if (isa<ComplexExpansion>(Exp.get()))
  828. return 2;
  829. assert(isa<NoExpansion>(Exp.get()));
  830. return 1;
  831. }
  832. void
  833. CodeGenTypes::getExpandedTypes(QualType Ty,
  834. SmallVectorImpl<llvm::Type *>::iterator &TI) {
  835. auto Exp = getTypeExpansion(Ty, Context);
  836. if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
  837. for (int i = 0, n = CAExp->NumElts; i < n; i++) {
  838. getExpandedTypes(CAExp->EltTy, TI);
  839. }
  840. } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
  841. for (auto BS : RExp->Bases)
  842. getExpandedTypes(BS->getType(), TI);
  843. for (auto FD : RExp->Fields)
  844. getExpandedTypes(FD->getType(), TI);
  845. } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
  846. llvm::Type *EltTy = ConvertType(CExp->EltTy);
  847. *TI++ = EltTy;
  848. *TI++ = EltTy;
  849. } else {
  850. assert(isa<NoExpansion>(Exp.get()));
  851. *TI++ = ConvertType(Ty);
  852. }
  853. }
  854. static void forConstantArrayExpansion(CodeGenFunction &CGF,
  855. ConstantArrayExpansion *CAE,
  856. Address BaseAddr,
  857. llvm::function_ref<void(Address)> Fn) {
  858. CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
  859. CharUnits EltAlign =
  860. BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
  861. for (int i = 0, n = CAE->NumElts; i < n; i++) {
  862. llvm::Value *EltAddr =
  863. CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
  864. Fn(Address(EltAddr, EltAlign));
  865. }
  866. }
  867. void CodeGenFunction::ExpandTypeFromArgs(
  868. QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
  869. assert(LV.isSimple() &&
  870. "Unexpected non-simple lvalue during struct expansion.");
  871. auto Exp = getTypeExpansion(Ty, getContext());
  872. if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
  873. forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
  874. [&](Address EltAddr) {
  875. LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
  876. ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
  877. });
  878. } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
  879. Address This = LV.getAddress();
  880. for (const CXXBaseSpecifier *BS : RExp->Bases) {
  881. // Perform a single step derived-to-base conversion.
  882. Address Base =
  883. GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
  884. /*NullCheckValue=*/false, SourceLocation());
  885. LValue SubLV = MakeAddrLValue(Base, BS->getType());
  886. // Recurse onto bases.
  887. ExpandTypeFromArgs(BS->getType(), SubLV, AI);
  888. }
  889. for (auto FD : RExp->Fields) {
  890. // FIXME: What are the right qualifiers here?
  891. LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
  892. ExpandTypeFromArgs(FD->getType(), SubLV, AI);
  893. }
  894. } else if (isa<ComplexExpansion>(Exp.get())) {
  895. auto realValue = *AI++;
  896. auto imagValue = *AI++;
  897. EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
  898. } else {
  899. assert(isa<NoExpansion>(Exp.get()));
  900. EmitStoreThroughLValue(RValue::get(*AI++), LV);
  901. }
  902. }
  903. void CodeGenFunction::ExpandTypeToArgs(
  904. QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
  905. SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
  906. auto Exp = getTypeExpansion(Ty, getContext());
  907. if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
  908. forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
  909. [&](Address EltAddr) {
  910. RValue EltRV =
  911. convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
  912. ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
  913. });
  914. } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
  915. Address This = RV.getAggregateAddress();
  916. for (const CXXBaseSpecifier *BS : RExp->Bases) {
  917. // Perform a single step derived-to-base conversion.
  918. Address Base =
  919. GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
  920. /*NullCheckValue=*/false, SourceLocation());
  921. RValue BaseRV = RValue::getAggregate(Base);
  922. // Recurse onto bases.
  923. ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
  924. IRCallArgPos);
  925. }
  926. LValue LV = MakeAddrLValue(This, Ty);
  927. for (auto FD : RExp->Fields) {
  928. RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
  929. ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
  930. IRCallArgPos);
  931. }
  932. } else if (isa<ComplexExpansion>(Exp.get())) {
  933. ComplexPairTy CV = RV.getComplexVal();
  934. IRCallArgs[IRCallArgPos++] = CV.first;
  935. IRCallArgs[IRCallArgPos++] = CV.second;
  936. } else {
  937. assert(isa<NoExpansion>(Exp.get()));
  938. assert(RV.isScalar() &&
  939. "Unexpected non-scalar rvalue during struct expansion.");
  940. // Insert a bitcast as needed.
  941. llvm::Value *V = RV.getScalarVal();
  942. if (IRCallArgPos < IRFuncTy->getNumParams() &&
  943. V->getType() != IRFuncTy->getParamType(IRCallArgPos))
  944. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
  945. IRCallArgs[IRCallArgPos++] = V;
  946. }
  947. }
  948. /// Create a temporary allocation for the purposes of coercion.
  949. static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
  950. CharUnits MinAlign) {
  951. // Don't use an alignment that's worse than what LLVM would prefer.
  952. auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
  953. CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
  954. return CGF.CreateTempAlloca(Ty, Align);
  955. }
  956. /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
  957. /// accessing some number of bytes out of it, try to gep into the struct to get
  958. /// at its inner goodness. Dive as deep as possible without entering an element
  959. /// with an in-memory size smaller than DstSize.
  960. static Address
  961. EnterStructPointerForCoercedAccess(Address SrcPtr,
  962. llvm::StructType *SrcSTy,
  963. uint64_t DstSize, CodeGenFunction &CGF) {
  964. // We can't dive into a zero-element struct.
  965. if (SrcSTy->getNumElements() == 0) return SrcPtr;
  966. llvm::Type *FirstElt = SrcSTy->getElementType(0);
  967. // If the first elt is at least as large as what we're looking for, or if the
  968. // first element is the same size as the whole struct, we can enter it. The
  969. // comparison must be made on the store size and not the alloca size. Using
  970. // the alloca size may overstate the size of the load.
  971. uint64_t FirstEltSize =
  972. CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
  973. if (FirstEltSize < DstSize &&
  974. FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
  975. return SrcPtr;
  976. // GEP into the first element.
  977. SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
  978. // If the first element is a struct, recurse.
  979. llvm::Type *SrcTy = SrcPtr.getElementType();
  980. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
  981. return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  982. return SrcPtr;
  983. }
  984. /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
  985. /// are either integers or pointers. This does a truncation of the value if it
  986. /// is too large or a zero extension if it is too small.
  987. ///
  988. /// This behaves as if the value were coerced through memory, so on big-endian
  989. /// targets the high bits are preserved in a truncation, while little-endian
  990. /// targets preserve the low bits.
  991. static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
  992. llvm::Type *Ty,
  993. CodeGenFunction &CGF) {
  994. if (Val->getType() == Ty)
  995. return Val;
  996. if (isa<llvm::PointerType>(Val->getType())) {
  997. // If this is Pointer->Pointer avoid conversion to and from int.
  998. if (isa<llvm::PointerType>(Ty))
  999. return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
  1000. // Convert the pointer to an integer so we can play with its width.
  1001. Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
  1002. }
  1003. llvm::Type *DestIntTy = Ty;
  1004. if (isa<llvm::PointerType>(DestIntTy))
  1005. DestIntTy = CGF.IntPtrTy;
  1006. if (Val->getType() != DestIntTy) {
  1007. const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
  1008. if (DL.isBigEndian()) {
  1009. // Preserve the high bits on big-endian targets.
  1010. // That is what memory coercion does.
  1011. uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
  1012. uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
  1013. if (SrcSize > DstSize) {
  1014. Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
  1015. Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
  1016. } else {
  1017. Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
  1018. Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
  1019. }
  1020. } else {
  1021. // Little-endian targets preserve the low bits. No shifts required.
  1022. Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
  1023. }
  1024. }
  1025. if (isa<llvm::PointerType>(Ty))
  1026. Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
  1027. return Val;
  1028. }
  1029. /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
  1030. /// a pointer to an object of type \arg Ty, known to be aligned to
  1031. /// \arg SrcAlign bytes.
  1032. ///
  1033. /// This safely handles the case when the src type is smaller than the
  1034. /// destination type; in this situation the values of bits which not
  1035. /// present in the src are undefined.
  1036. static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
  1037. CodeGenFunction &CGF) {
  1038. llvm::Type *SrcTy = Src.getElementType();
  1039. // If SrcTy and Ty are the same, just do a load.
  1040. if (SrcTy == Ty)
  1041. return CGF.Builder.CreateLoad(Src);
  1042. uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
  1043. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
  1044. Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
  1045. SrcTy = Src.getType()->getElementType();
  1046. }
  1047. uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
  1048. // If the source and destination are integer or pointer types, just do an
  1049. // extension or truncation to the desired type.
  1050. if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
  1051. (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
  1052. llvm::Value *Load = CGF.Builder.CreateLoad(Src);
  1053. return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
  1054. }
  1055. // If load is legal, just bitcast the src pointer.
  1056. if (SrcSize >= DstSize) {
  1057. // Generally SrcSize is never greater than DstSize, since this means we are
  1058. // losing bits. However, this can happen in cases where the structure has
  1059. // additional padding, for example due to a user specified alignment.
  1060. //
  1061. // FIXME: Assert that we aren't truncating non-padding bits when have access
  1062. // to that information.
  1063. Src = CGF.Builder.CreateBitCast(Src,
  1064. Ty->getPointerTo(Src.getAddressSpace()));
  1065. return CGF.Builder.CreateLoad(Src);
  1066. }
  1067. // Otherwise do coercion through memory. This is stupid, but simple.
  1068. Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
  1069. Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
  1070. Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
  1071. CGF.Builder.CreateMemCpy(Casted, SrcCasted,
  1072. llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
  1073. false);
  1074. return CGF.Builder.CreateLoad(Tmp);
  1075. }
  1076. // Function to store a first-class aggregate into memory. We prefer to
  1077. // store the elements rather than the aggregate to be more friendly to
  1078. // fast-isel.
  1079. // FIXME: Do we need to recurse here?
  1080. static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
  1081. Address Dest, bool DestIsVolatile) {
  1082. // Prefer scalar stores to first-class aggregate stores.
  1083. if (llvm::StructType *STy =
  1084. dyn_cast<llvm::StructType>(Val->getType())) {
  1085. const llvm::StructLayout *Layout =
  1086. CGF.CGM.getDataLayout().getStructLayout(STy);
  1087. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  1088. auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
  1089. Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
  1090. llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
  1091. CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
  1092. }
  1093. } else {
  1094. CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
  1095. }
  1096. }
  1097. /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
  1098. /// where the source and destination may have different types. The
  1099. /// destination is known to be aligned to \arg DstAlign bytes.
  1100. ///
  1101. /// This safely handles the case when the src type is larger than the
  1102. /// destination type; the upper bits of the src will be lost.
  1103. static void CreateCoercedStore(llvm::Value *Src,
  1104. Address Dst,
  1105. bool DstIsVolatile,
  1106. CodeGenFunction &CGF) {
  1107. llvm::Type *SrcTy = Src->getType();
  1108. llvm::Type *DstTy = Dst.getType()->getElementType();
  1109. if (SrcTy == DstTy) {
  1110. CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
  1111. return;
  1112. }
  1113. uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
  1114. if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
  1115. Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
  1116. DstTy = Dst.getType()->getElementType();
  1117. }
  1118. // If the source and destination are integer or pointer types, just do an
  1119. // extension or truncation to the desired type.
  1120. if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
  1121. (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
  1122. Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
  1123. CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
  1124. return;
  1125. }
  1126. uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
  1127. // If store is legal, just bitcast the src pointer.
  1128. if (SrcSize <= DstSize) {
  1129. Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
  1130. BuildAggStore(CGF, Src, Dst, DstIsVolatile);
  1131. } else {
  1132. // Otherwise do coercion through memory. This is stupid, but
  1133. // simple.
  1134. // Generally SrcSize is never greater than DstSize, since this means we are
  1135. // losing bits. However, this can happen in cases where the structure has
  1136. // additional padding, for example due to a user specified alignment.
  1137. //
  1138. // FIXME: Assert that we aren't truncating non-padding bits when have access
  1139. // to that information.
  1140. Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
  1141. CGF.Builder.CreateStore(Src, Tmp);
  1142. Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
  1143. Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
  1144. CGF.Builder.CreateMemCpy(DstCasted, Casted,
  1145. llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
  1146. false);
  1147. }
  1148. }
  1149. static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
  1150. const ABIArgInfo &info) {
  1151. if (unsigned offset = info.getDirectOffset()) {
  1152. addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
  1153. addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
  1154. CharUnits::fromQuantity(offset));
  1155. addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
  1156. }
  1157. return addr;
  1158. }
  1159. namespace {
  1160. /// Encapsulates information about the way function arguments from
  1161. /// CGFunctionInfo should be passed to actual LLVM IR function.
  1162. class ClangToLLVMArgMapping {
  1163. static const unsigned InvalidIndex = ~0U;
  1164. unsigned InallocaArgNo;
  1165. unsigned SRetArgNo;
  1166. unsigned TotalIRArgs;
  1167. /// Arguments of LLVM IR function corresponding to single Clang argument.
  1168. struct IRArgs {
  1169. unsigned PaddingArgIndex;
  1170. // Argument is expanded to IR arguments at positions
  1171. // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
  1172. unsigned FirstArgIndex;
  1173. unsigned NumberOfArgs;
  1174. IRArgs()
  1175. : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
  1176. NumberOfArgs(0) {}
  1177. };
  1178. SmallVector<IRArgs, 8> ArgInfo;
  1179. public:
  1180. ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
  1181. bool OnlyRequiredArgs = false)
  1182. : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
  1183. ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
  1184. construct(Context, FI, OnlyRequiredArgs);
  1185. }
  1186. bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
  1187. unsigned getInallocaArgNo() const {
  1188. assert(hasInallocaArg());
  1189. return InallocaArgNo;
  1190. }
  1191. bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
  1192. unsigned getSRetArgNo() const {
  1193. assert(hasSRetArg());
  1194. return SRetArgNo;
  1195. }
  1196. unsigned totalIRArgs() const { return TotalIRArgs; }
  1197. bool hasPaddingArg(unsigned ArgNo) const {
  1198. assert(ArgNo < ArgInfo.size());
  1199. return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
  1200. }
  1201. unsigned getPaddingArgNo(unsigned ArgNo) const {
  1202. assert(hasPaddingArg(ArgNo));
  1203. return ArgInfo[ArgNo].PaddingArgIndex;
  1204. }
  1205. /// Returns index of first IR argument corresponding to ArgNo, and their
  1206. /// quantity.
  1207. std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
  1208. assert(ArgNo < ArgInfo.size());
  1209. return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
  1210. ArgInfo[ArgNo].NumberOfArgs);
  1211. }
  1212. private:
  1213. void construct(const ASTContext &Context, const CGFunctionInfo &FI,
  1214. bool OnlyRequiredArgs);
  1215. };
  1216. void ClangToLLVMArgMapping::construct(const ASTContext &Context,
  1217. const CGFunctionInfo &FI,
  1218. bool OnlyRequiredArgs) {
  1219. unsigned IRArgNo = 0;
  1220. bool SwapThisWithSRet = false;
  1221. const ABIArgInfo &RetAI = FI.getReturnInfo();
  1222. if (RetAI.getKind() == ABIArgInfo::Indirect) {
  1223. SwapThisWithSRet = RetAI.isSRetAfterThis();
  1224. SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
  1225. }
  1226. unsigned ArgNo = 0;
  1227. unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
  1228. for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
  1229. ++I, ++ArgNo) {
  1230. assert(I != FI.arg_end());
  1231. QualType ArgType = I->type;
  1232. const ABIArgInfo &AI = I->info;
  1233. // Collect data about IR arguments corresponding to Clang argument ArgNo.
  1234. auto &IRArgs = ArgInfo[ArgNo];
  1235. if (AI.getPaddingType())
  1236. IRArgs.PaddingArgIndex = IRArgNo++;
  1237. switch (AI.getKind()) {
  1238. case ABIArgInfo::Extend:
  1239. case ABIArgInfo::Direct: {
  1240. // FIXME: handle sseregparm someday...
  1241. llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
  1242. if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
  1243. IRArgs.NumberOfArgs = STy->getNumElements();
  1244. } else {
  1245. IRArgs.NumberOfArgs = 1;
  1246. }
  1247. break;
  1248. }
  1249. case ABIArgInfo::Indirect:
  1250. IRArgs.NumberOfArgs = 1;
  1251. break;
  1252. case ABIArgInfo::Ignore:
  1253. case ABIArgInfo::InAlloca:
  1254. // ignore and inalloca doesn't have matching LLVM parameters.
  1255. IRArgs.NumberOfArgs = 0;
  1256. break;
  1257. case ABIArgInfo::CoerceAndExpand:
  1258. IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
  1259. break;
  1260. case ABIArgInfo::Expand:
  1261. IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
  1262. break;
  1263. }
  1264. if (IRArgs.NumberOfArgs > 0) {
  1265. IRArgs.FirstArgIndex = IRArgNo;
  1266. IRArgNo += IRArgs.NumberOfArgs;
  1267. }
  1268. // Skip over the sret parameter when it comes second. We already handled it
  1269. // above.
  1270. if (IRArgNo == 1 && SwapThisWithSRet)
  1271. IRArgNo++;
  1272. }
  1273. assert(ArgNo == ArgInfo.size());
  1274. if (FI.usesInAlloca())
  1275. InallocaArgNo = IRArgNo++;
  1276. TotalIRArgs = IRArgNo;
  1277. }
  1278. } // namespace
  1279. /***/
  1280. bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
  1281. return FI.getReturnInfo().isIndirect();
  1282. }
  1283. bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
  1284. return ReturnTypeUsesSRet(FI) &&
  1285. getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
  1286. }
  1287. bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
  1288. if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
  1289. switch (BT->getKind()) {
  1290. default:
  1291. return false;
  1292. case BuiltinType::Float:
  1293. return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
  1294. case BuiltinType::Double:
  1295. return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
  1296. case BuiltinType::LongDouble:
  1297. return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
  1298. }
  1299. }
  1300. return false;
  1301. }
  1302. bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
  1303. if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
  1304. if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
  1305. if (BT->getKind() == BuiltinType::LongDouble)
  1306. return getTarget().useObjCFP2RetForComplexLongDouble();
  1307. }
  1308. }
  1309. return false;
  1310. }
  1311. llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
  1312. const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
  1313. return GetFunctionType(FI);
  1314. }
  1315. llvm::FunctionType *
  1316. CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
  1317. bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
  1318. (void)Inserted;
  1319. assert(Inserted && "Recursively being processed?");
  1320. llvm::Type *resultType = nullptr;
  1321. const ABIArgInfo &retAI = FI.getReturnInfo();
  1322. switch (retAI.getKind()) {
  1323. case ABIArgInfo::Expand:
  1324. llvm_unreachable("Invalid ABI kind for return argument");
  1325. case ABIArgInfo::Extend:
  1326. case ABIArgInfo::Direct:
  1327. resultType = retAI.getCoerceToType();
  1328. break;
  1329. case ABIArgInfo::InAlloca:
  1330. if (retAI.getInAllocaSRet()) {
  1331. // sret things on win32 aren't void, they return the sret pointer.
  1332. QualType ret = FI.getReturnType();
  1333. llvm::Type *ty = ConvertType(ret);
  1334. unsigned addressSpace = Context.getTargetAddressSpace(ret);
  1335. resultType = llvm::PointerType::get(ty, addressSpace);
  1336. } else {
  1337. resultType = llvm::Type::getVoidTy(getLLVMContext());
  1338. }
  1339. break;
  1340. case ABIArgInfo::Indirect:
  1341. case ABIArgInfo::Ignore:
  1342. resultType = llvm::Type::getVoidTy(getLLVMContext());
  1343. break;
  1344. case ABIArgInfo::CoerceAndExpand:
  1345. resultType = retAI.getUnpaddedCoerceAndExpandType();
  1346. break;
  1347. }
  1348. ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
  1349. SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
  1350. // Add type for sret argument.
  1351. if (IRFunctionArgs.hasSRetArg()) {
  1352. QualType Ret = FI.getReturnType();
  1353. llvm::Type *Ty = ConvertType(Ret);
  1354. unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
  1355. ArgTypes[IRFunctionArgs.getSRetArgNo()] =
  1356. llvm::PointerType::get(Ty, AddressSpace);
  1357. }
  1358. // Add type for inalloca argument.
  1359. if (IRFunctionArgs.hasInallocaArg()) {
  1360. auto ArgStruct = FI.getArgStruct();
  1361. assert(ArgStruct);
  1362. ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
  1363. }
  1364. // Add in all of the required arguments.
  1365. unsigned ArgNo = 0;
  1366. CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
  1367. ie = it + FI.getNumRequiredArgs();
  1368. for (; it != ie; ++it, ++ArgNo) {
  1369. const ABIArgInfo &ArgInfo = it->info;
  1370. // Insert a padding type to ensure proper alignment.
  1371. if (IRFunctionArgs.hasPaddingArg(ArgNo))
  1372. ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
  1373. ArgInfo.getPaddingType();
  1374. unsigned FirstIRArg, NumIRArgs;
  1375. std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
  1376. switch (ArgInfo.getKind()) {
  1377. case ABIArgInfo::Ignore:
  1378. case ABIArgInfo::InAlloca:
  1379. assert(NumIRArgs == 0);
  1380. break;
  1381. case ABIArgInfo::Indirect: {
  1382. assert(NumIRArgs == 1);
  1383. // indirect arguments are always on the stack, which is alloca addr space.
  1384. llvm::Type *LTy = ConvertTypeForMem(it->type);
  1385. ArgTypes[FirstIRArg] = LTy->getPointerTo(
  1386. CGM.getDataLayout().getAllocaAddrSpace());
  1387. break;
  1388. }
  1389. case ABIArgInfo::Extend:
  1390. case ABIArgInfo::Direct: {
  1391. // Fast-isel and the optimizer generally like scalar values better than
  1392. // FCAs, so we flatten them if this is safe to do for this argument.
  1393. llvm::Type *argType = ArgInfo.getCoerceToType();
  1394. llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
  1395. if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
  1396. assert(NumIRArgs == st->getNumElements());
  1397. for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
  1398. ArgTypes[FirstIRArg + i] = st->getElementType(i);
  1399. } else {
  1400. assert(NumIRArgs == 1);
  1401. ArgTypes[FirstIRArg] = argType;
  1402. }
  1403. break;
  1404. }
  1405. case ABIArgInfo::CoerceAndExpand: {
  1406. auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
  1407. for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
  1408. *ArgTypesIter++ = EltTy;
  1409. }
  1410. assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
  1411. break;
  1412. }
  1413. case ABIArgInfo::Expand:
  1414. auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
  1415. getExpandedTypes(it->type, ArgTypesIter);
  1416. assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
  1417. break;
  1418. }
  1419. }
  1420. bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
  1421. assert(Erased && "Not in set?");
  1422. return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
  1423. }
  1424. llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
  1425. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  1426. const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  1427. if (!isFuncTypeConvertible(FPT))
  1428. return llvm::StructType::get(getLLVMContext());
  1429. const CGFunctionInfo *Info;
  1430. if (isa<CXXDestructorDecl>(MD))
  1431. Info =
  1432. &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
  1433. else
  1434. Info = &arrangeCXXMethodDeclaration(MD);
  1435. return GetFunctionType(*Info);
  1436. }
  1437. static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
  1438. llvm::AttrBuilder &FuncAttrs,
  1439. const FunctionProtoType *FPT) {
  1440. if (!FPT)
  1441. return;
  1442. if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
  1443. FPT->isNothrow(Ctx))
  1444. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1445. }
  1446. void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
  1447. bool AttrOnCallSite,
  1448. llvm::AttrBuilder &FuncAttrs) {
  1449. // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
  1450. if (!HasOptnone) {
  1451. if (CodeGenOpts.OptimizeSize)
  1452. FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
  1453. if (CodeGenOpts.OptimizeSize == 2)
  1454. FuncAttrs.addAttribute(llvm::Attribute::MinSize);
  1455. }
  1456. if (CodeGenOpts.DisableRedZone)
  1457. FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
  1458. if (CodeGenOpts.NoImplicitFloat)
  1459. FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
  1460. if (AttrOnCallSite) {
  1461. // Attributes that should go on the call site only.
  1462. if (!CodeGenOpts.SimplifyLibCalls ||
  1463. CodeGenOpts.isNoBuiltinFunc(Name.data()))
  1464. FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
  1465. if (!CodeGenOpts.TrapFuncName.empty())
  1466. FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
  1467. } else {
  1468. // Attributes that should go on the function, but not the call site.
  1469. if (!CodeGenOpts.DisableFPElim) {
  1470. FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
  1471. } else if (CodeGenOpts.OmitLeafFramePointer) {
  1472. FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
  1473. FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
  1474. } else {
  1475. FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
  1476. FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
  1477. }
  1478. FuncAttrs.addAttribute("less-precise-fpmad",
  1479. llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
  1480. if (!CodeGenOpts.FPDenormalMode.empty())
  1481. FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
  1482. FuncAttrs.addAttribute("no-trapping-math",
  1483. llvm::toStringRef(CodeGenOpts.NoTrappingMath));
  1484. // TODO: Are these all needed?
  1485. // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
  1486. FuncAttrs.addAttribute("no-infs-fp-math",
  1487. llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
  1488. FuncAttrs.addAttribute("no-nans-fp-math",
  1489. llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
  1490. FuncAttrs.addAttribute("unsafe-fp-math",
  1491. llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
  1492. FuncAttrs.addAttribute("use-soft-float",
  1493. llvm::toStringRef(CodeGenOpts.SoftFloat));
  1494. FuncAttrs.addAttribute("stack-protector-buffer-size",
  1495. llvm::utostr(CodeGenOpts.SSPBufferSize));
  1496. FuncAttrs.addAttribute("no-signed-zeros-fp-math",
  1497. llvm::toStringRef(CodeGenOpts.NoSignedZeros));
  1498. FuncAttrs.addAttribute(
  1499. "correctly-rounded-divide-sqrt-fp-math",
  1500. llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
  1501. // TODO: Reciprocal estimate codegen options should apply to instructions?
  1502. std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
  1503. if (!Recips.empty())
  1504. FuncAttrs.addAttribute("reciprocal-estimates",
  1505. llvm::join(Recips, ","));
  1506. if (CodeGenOpts.StackRealignment)
  1507. FuncAttrs.addAttribute("stackrealign");
  1508. if (CodeGenOpts.Backchain)
  1509. FuncAttrs.addAttribute("backchain");
  1510. }
  1511. if (getLangOpts().assumeFunctionsAreConvergent()) {
  1512. // Conservatively, mark all functions and calls in CUDA and OpenCL as
  1513. // convergent (meaning, they may call an intrinsically convergent op, such
  1514. // as __syncthreads() / barrier(), and so can't have certain optimizations
  1515. // applied around them). LLVM will remove this attribute where it safely
  1516. // can.
  1517. FuncAttrs.addAttribute(llvm::Attribute::Convergent);
  1518. }
  1519. if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
  1520. // Exceptions aren't supported in CUDA device code.
  1521. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1522. // Respect -fcuda-flush-denormals-to-zero.
  1523. if (getLangOpts().CUDADeviceFlushDenormalsToZero)
  1524. FuncAttrs.addAttribute("nvptx-f32ftz", "true");
  1525. }
  1526. }
  1527. void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
  1528. llvm::AttrBuilder FuncAttrs;
  1529. ConstructDefaultFnAttrList(F.getName(),
  1530. F.hasFnAttribute(llvm::Attribute::OptimizeNone),
  1531. /* AttrOnCallsite = */ false, FuncAttrs);
  1532. F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
  1533. }
  1534. void CodeGenModule::ConstructAttributeList(
  1535. StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
  1536. llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
  1537. llvm::AttrBuilder FuncAttrs;
  1538. llvm::AttrBuilder RetAttrs;
  1539. CallingConv = FI.getEffectiveCallingConvention();
  1540. if (FI.isNoReturn())
  1541. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  1542. // If we have information about the function prototype, we can learn
  1543. // attributes form there.
  1544. AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
  1545. CalleeInfo.getCalleeFunctionProtoType());
  1546. const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
  1547. bool HasOptnone = false;
  1548. // FIXME: handle sseregparm someday...
  1549. if (TargetDecl) {
  1550. if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
  1551. FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
  1552. if (TargetDecl->hasAttr<NoThrowAttr>())
  1553. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1554. if (TargetDecl->hasAttr<NoReturnAttr>())
  1555. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  1556. if (TargetDecl->hasAttr<ColdAttr>())
  1557. FuncAttrs.addAttribute(llvm::Attribute::Cold);
  1558. if (TargetDecl->hasAttr<NoDuplicateAttr>())
  1559. FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
  1560. if (TargetDecl->hasAttr<ConvergentAttr>())
  1561. FuncAttrs.addAttribute(llvm::Attribute::Convergent);
  1562. if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
  1563. AddAttributesFromFunctionProtoType(
  1564. getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
  1565. // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
  1566. // These attributes are not inherited by overloads.
  1567. const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
  1568. if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
  1569. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  1570. }
  1571. // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
  1572. if (TargetDecl->hasAttr<ConstAttr>()) {
  1573. FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
  1574. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1575. } else if (TargetDecl->hasAttr<PureAttr>()) {
  1576. FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
  1577. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1578. } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
  1579. FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
  1580. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1581. }
  1582. if (TargetDecl->hasAttr<RestrictAttr>())
  1583. RetAttrs.addAttribute(llvm::Attribute::NoAlias);
  1584. if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
  1585. RetAttrs.addAttribute(llvm::Attribute::NonNull);
  1586. if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
  1587. FuncAttrs.addAttribute("no_caller_saved_registers");
  1588. HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
  1589. if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
  1590. Optional<unsigned> NumElemsParam;
  1591. // alloc_size args are base-1, 0 means not present.
  1592. if (unsigned N = AllocSize->getNumElemsParam())
  1593. NumElemsParam = N - 1;
  1594. FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
  1595. NumElemsParam);
  1596. }
  1597. }
  1598. ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
  1599. if (CodeGenOpts.EnableSegmentedStacks &&
  1600. !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
  1601. FuncAttrs.addAttribute("split-stack");
  1602. // Add NonLazyBind attribute to function declarations when -fno-plt
  1603. // is used.
  1604. if (TargetDecl && CodeGenOpts.NoPLT) {
  1605. if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
  1606. if (!Fn->isDefined() && !AttrOnCallSite) {
  1607. FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
  1608. }
  1609. }
  1610. }
  1611. if (!AttrOnCallSite) {
  1612. bool DisableTailCalls =
  1613. CodeGenOpts.DisableTailCalls ||
  1614. (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
  1615. TargetDecl->hasAttr<AnyX86InterruptAttr>()));
  1616. FuncAttrs.addAttribute("disable-tail-calls",
  1617. llvm::toStringRef(DisableTailCalls));
  1618. // Add target-cpu and target-features attributes to functions. If
  1619. // we have a decl for the function and it has a target attribute then
  1620. // parse that and add it to the feature set.
  1621. StringRef TargetCPU = getTarget().getTargetOpts().CPU;
  1622. const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
  1623. if (FD && FD->hasAttr<TargetAttr>()) {
  1624. llvm::StringMap<bool> FeatureMap;
  1625. getFunctionFeatureMap(FeatureMap, FD);
  1626. // Produce the canonical string for this set of features.
  1627. std::vector<std::string> Features;
  1628. for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
  1629. ie = FeatureMap.end();
  1630. it != ie; ++it)
  1631. Features.push_back((it->second ? "+" : "-") + it->first().str());
  1632. // Now add the target-cpu and target-features to the function.
  1633. // While we populated the feature map above, we still need to
  1634. // get and parse the target attribute so we can get the cpu for
  1635. // the function.
  1636. const auto *TD = FD->getAttr<TargetAttr>();
  1637. TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
  1638. if (ParsedAttr.Architecture != "" &&
  1639. getTarget().isValidCPUName(ParsedAttr.Architecture))
  1640. TargetCPU = ParsedAttr.Architecture;
  1641. if (TargetCPU != "")
  1642. FuncAttrs.addAttribute("target-cpu", TargetCPU);
  1643. if (!Features.empty()) {
  1644. std::sort(Features.begin(), Features.end());
  1645. FuncAttrs.addAttribute(
  1646. "target-features",
  1647. llvm::join(Features, ","));
  1648. }
  1649. } else {
  1650. // Otherwise just add the existing target cpu and target features to the
  1651. // function.
  1652. std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
  1653. if (TargetCPU != "")
  1654. FuncAttrs.addAttribute("target-cpu", TargetCPU);
  1655. if (!Features.empty()) {
  1656. std::sort(Features.begin(), Features.end());
  1657. FuncAttrs.addAttribute(
  1658. "target-features",
  1659. llvm::join(Features, ","));
  1660. }
  1661. }
  1662. }
  1663. ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
  1664. QualType RetTy = FI.getReturnType();
  1665. const ABIArgInfo &RetAI = FI.getReturnInfo();
  1666. switch (RetAI.getKind()) {
  1667. case ABIArgInfo::Extend:
  1668. if (RetTy->hasSignedIntegerRepresentation())
  1669. RetAttrs.addAttribute(llvm::Attribute::SExt);
  1670. else if (RetTy->hasUnsignedIntegerRepresentation())
  1671. RetAttrs.addAttribute(llvm::Attribute::ZExt);
  1672. // FALL THROUGH
  1673. case ABIArgInfo::Direct:
  1674. if (RetAI.getInReg())
  1675. RetAttrs.addAttribute(llvm::Attribute::InReg);
  1676. break;
  1677. case ABIArgInfo::Ignore:
  1678. break;
  1679. case ABIArgInfo::InAlloca:
  1680. case ABIArgInfo::Indirect: {
  1681. // inalloca and sret disable readnone and readonly
  1682. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
  1683. .removeAttribute(llvm::Attribute::ReadNone);
  1684. break;
  1685. }
  1686. case ABIArgInfo::CoerceAndExpand:
  1687. break;
  1688. case ABIArgInfo::Expand:
  1689. llvm_unreachable("Invalid ABI kind for return argument");
  1690. }
  1691. if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
  1692. QualType PTy = RefTy->getPointeeType();
  1693. if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
  1694. RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
  1695. .getQuantity());
  1696. else if (getContext().getTargetAddressSpace(PTy) == 0)
  1697. RetAttrs.addAttribute(llvm::Attribute::NonNull);
  1698. }
  1699. bool hasUsedSRet = false;
  1700. SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
  1701. // Attach attributes to sret.
  1702. if (IRFunctionArgs.hasSRetArg()) {
  1703. llvm::AttrBuilder SRETAttrs;
  1704. SRETAttrs.addAttribute(llvm::Attribute::StructRet);
  1705. hasUsedSRet = true;
  1706. if (RetAI.getInReg())
  1707. SRETAttrs.addAttribute(llvm::Attribute::InReg);
  1708. ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
  1709. llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
  1710. }
  1711. // Attach attributes to inalloca argument.
  1712. if (IRFunctionArgs.hasInallocaArg()) {
  1713. llvm::AttrBuilder Attrs;
  1714. Attrs.addAttribute(llvm::Attribute::InAlloca);
  1715. ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
  1716. llvm::AttributeSet::get(getLLVMContext(), Attrs);
  1717. }
  1718. unsigned ArgNo = 0;
  1719. for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
  1720. E = FI.arg_end();
  1721. I != E; ++I, ++ArgNo) {
  1722. QualType ParamType = I->type;
  1723. const ABIArgInfo &AI = I->info;
  1724. llvm::AttrBuilder Attrs;
  1725. // Add attribute for padding argument, if necessary.
  1726. if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
  1727. if (AI.getPaddingInReg()) {
  1728. ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
  1729. llvm::AttributeSet::get(
  1730. getLLVMContext(),
  1731. llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
  1732. }
  1733. }
  1734. // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
  1735. // have the corresponding parameter variable. It doesn't make
  1736. // sense to do it here because parameters are so messed up.
  1737. switch (AI.getKind()) {
  1738. case ABIArgInfo::Extend:
  1739. if (ParamType->isSignedIntegerOrEnumerationType())
  1740. Attrs.addAttribute(llvm::Attribute::SExt);
  1741. else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
  1742. if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
  1743. Attrs.addAttribute(llvm::Attribute::SExt);
  1744. else
  1745. Attrs.addAttribute(llvm::Attribute::ZExt);
  1746. }
  1747. // FALL THROUGH
  1748. case ABIArgInfo::Direct:
  1749. if (ArgNo == 0 && FI.isChainCall())
  1750. Attrs.addAttribute(llvm::Attribute::Nest);
  1751. else if (AI.getInReg())
  1752. Attrs.addAttribute(llvm::Attribute::InReg);
  1753. break;
  1754. case ABIArgInfo::Indirect: {
  1755. if (AI.getInReg())
  1756. Attrs.addAttribute(llvm::Attribute::InReg);
  1757. if (AI.getIndirectByVal())
  1758. Attrs.addAttribute(llvm::Attribute::ByVal);
  1759. CharUnits Align = AI.getIndirectAlign();
  1760. // In a byval argument, it is important that the required
  1761. // alignment of the type is honored, as LLVM might be creating a
  1762. // *new* stack object, and needs to know what alignment to give
  1763. // it. (Sometimes it can deduce a sensible alignment on its own,
  1764. // but not if clang decides it must emit a packed struct, or the
  1765. // user specifies increased alignment requirements.)
  1766. //
  1767. // This is different from indirect *not* byval, where the object
  1768. // exists already, and the align attribute is purely
  1769. // informative.
  1770. assert(!Align.isZero());
  1771. // For now, only add this when we have a byval argument.
  1772. // TODO: be less lazy about updating test cases.
  1773. if (AI.getIndirectByVal())
  1774. Attrs.addAlignmentAttr(Align.getQuantity());
  1775. // byval disables readnone and readonly.
  1776. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
  1777. .removeAttribute(llvm::Attribute::ReadNone);
  1778. break;
  1779. }
  1780. case ABIArgInfo::Ignore:
  1781. case ABIArgInfo::Expand:
  1782. case ABIArgInfo::CoerceAndExpand:
  1783. break;
  1784. case ABIArgInfo::InAlloca:
  1785. // inalloca disables readnone and readonly.
  1786. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
  1787. .removeAttribute(llvm::Attribute::ReadNone);
  1788. continue;
  1789. }
  1790. if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
  1791. QualType PTy = RefTy->getPointeeType();
  1792. if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
  1793. Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
  1794. .getQuantity());
  1795. else if (getContext().getTargetAddressSpace(PTy) == 0)
  1796. Attrs.addAttribute(llvm::Attribute::NonNull);
  1797. }
  1798. switch (FI.getExtParameterInfo(ArgNo).getABI()) {
  1799. case ParameterABI::Ordinary:
  1800. break;
  1801. case ParameterABI::SwiftIndirectResult: {
  1802. // Add 'sret' if we haven't already used it for something, but
  1803. // only if the result is void.
  1804. if (!hasUsedSRet && RetTy->isVoidType()) {
  1805. Attrs.addAttribute(llvm::Attribute::StructRet);
  1806. hasUsedSRet = true;
  1807. }
  1808. // Add 'noalias' in either case.
  1809. Attrs.addAttribute(llvm::Attribute::NoAlias);
  1810. // Add 'dereferenceable' and 'alignment'.
  1811. auto PTy = ParamType->getPointeeType();
  1812. if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
  1813. auto info = getContext().getTypeInfoInChars(PTy);
  1814. Attrs.addDereferenceableAttr(info.first.getQuantity());
  1815. Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
  1816. info.second.getQuantity()));
  1817. }
  1818. break;
  1819. }
  1820. case ParameterABI::SwiftErrorResult:
  1821. Attrs.addAttribute(llvm::Attribute::SwiftError);
  1822. break;
  1823. case ParameterABI::SwiftContext:
  1824. Attrs.addAttribute(llvm::Attribute::SwiftSelf);
  1825. break;
  1826. }
  1827. if (FI.getExtParameterInfo(ArgNo).isNoEscape())
  1828. Attrs.addAttribute(llvm::Attribute::NoCapture);
  1829. if (Attrs.hasAttributes()) {
  1830. unsigned FirstIRArg, NumIRArgs;
  1831. std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
  1832. for (unsigned i = 0; i < NumIRArgs; i++)
  1833. ArgAttrs[FirstIRArg + i] =
  1834. llvm::AttributeSet::get(getLLVMContext(), Attrs);
  1835. }
  1836. }
  1837. assert(ArgNo == FI.arg_size());
  1838. AttrList = llvm::AttributeList::get(
  1839. getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
  1840. llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
  1841. }
  1842. /// An argument came in as a promoted argument; demote it back to its
  1843. /// declared type.
  1844. static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
  1845. const VarDecl *var,
  1846. llvm::Value *value) {
  1847. llvm::Type *varType = CGF.ConvertType(var->getType());
  1848. // This can happen with promotions that actually don't change the
  1849. // underlying type, like the enum promotions.
  1850. if (value->getType() == varType) return value;
  1851. assert((varType->isIntegerTy() || varType->isFloatingPointTy())
  1852. && "unexpected promotion type");
  1853. if (isa<llvm::IntegerType>(varType))
  1854. return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
  1855. return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
  1856. }
  1857. /// Returns the attribute (either parameter attribute, or function
  1858. /// attribute), which declares argument ArgNo to be non-null.
  1859. static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
  1860. QualType ArgType, unsigned ArgNo) {
  1861. // FIXME: __attribute__((nonnull)) can also be applied to:
  1862. // - references to pointers, where the pointee is known to be
  1863. // nonnull (apparently a Clang extension)
  1864. // - transparent unions containing pointers
  1865. // In the former case, LLVM IR cannot represent the constraint. In
  1866. // the latter case, we have no guarantee that the transparent union
  1867. // is in fact passed as a pointer.
  1868. if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
  1869. return nullptr;
  1870. // First, check attribute on parameter itself.
  1871. if (PVD) {
  1872. if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
  1873. return ParmNNAttr;
  1874. }
  1875. // Check function attributes.
  1876. if (!FD)
  1877. return nullptr;
  1878. for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
  1879. if (NNAttr->isNonNull(ArgNo))
  1880. return NNAttr;
  1881. }
  1882. return nullptr;
  1883. }
  1884. namespace {
  1885. struct CopyBackSwiftError final : EHScopeStack::Cleanup {
  1886. Address Temp;
  1887. Address Arg;
  1888. CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
  1889. void Emit(CodeGenFunction &CGF, Flags flags) override {
  1890. llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
  1891. CGF.Builder.CreateStore(errorValue, Arg);
  1892. }
  1893. };
  1894. }
  1895. void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
  1896. llvm::Function *Fn,
  1897. const FunctionArgList &Args) {
  1898. if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
  1899. // Naked functions don't have prologues.
  1900. return;
  1901. // If this is an implicit-return-zero function, go ahead and
  1902. // initialize the return value. TODO: it might be nice to have
  1903. // a more general mechanism for this that didn't require synthesized
  1904. // return statements.
  1905. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
  1906. if (FD->hasImplicitReturnZero()) {
  1907. QualType RetTy = FD->getReturnType().getUnqualifiedType();
  1908. llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
  1909. llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
  1910. Builder.CreateStore(Zero, ReturnValue);
  1911. }
  1912. }
  1913. // FIXME: We no longer need the types from FunctionArgList; lift up and
  1914. // simplify.
  1915. ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
  1916. // Flattened function arguments.
  1917. SmallVector<llvm::Value *, 16> FnArgs;
  1918. FnArgs.reserve(IRFunctionArgs.totalIRArgs());
  1919. for (auto &Arg : Fn->args()) {
  1920. FnArgs.push_back(&Arg);
  1921. }
  1922. assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
  1923. // If we're using inalloca, all the memory arguments are GEPs off of the last
  1924. // parameter, which is a pointer to the complete memory area.
  1925. Address ArgStruct = Address::invalid();
  1926. const llvm::StructLayout *ArgStructLayout = nullptr;
  1927. if (IRFunctionArgs.hasInallocaArg()) {
  1928. ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
  1929. ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
  1930. FI.getArgStructAlignment());
  1931. assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
  1932. }
  1933. // Name the struct return parameter.
  1934. if (IRFunctionArgs.hasSRetArg()) {
  1935. auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
  1936. AI->setName("agg.result");
  1937. AI->addAttr(llvm::Attribute::NoAlias);
  1938. }
  1939. // Track if we received the parameter as a pointer (indirect, byval, or
  1940. // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
  1941. // into a local alloca for us.
  1942. SmallVector<ParamValue, 16> ArgVals;
  1943. ArgVals.reserve(Args.size());
  1944. // Create a pointer value for every parameter declaration. This usually
  1945. // entails copying one or more LLVM IR arguments into an alloca. Don't push
  1946. // any cleanups or do anything that might unwind. We do that separately, so
  1947. // we can push the cleanups in the correct order for the ABI.
  1948. assert(FI.arg_size() == Args.size() &&
  1949. "Mismatch between function signature & arguments.");
  1950. unsigned ArgNo = 0;
  1951. CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
  1952. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  1953. i != e; ++i, ++info_it, ++ArgNo) {
  1954. const VarDecl *Arg = *i;
  1955. QualType Ty = info_it->type;
  1956. const ABIArgInfo &ArgI = info_it->info;
  1957. bool isPromoted =
  1958. isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
  1959. unsigned FirstIRArg, NumIRArgs;
  1960. std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
  1961. switch (ArgI.getKind()) {
  1962. case ABIArgInfo::InAlloca: {
  1963. assert(NumIRArgs == 0);
  1964. auto FieldIndex = ArgI.getInAllocaFieldIndex();
  1965. CharUnits FieldOffset =
  1966. CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
  1967. Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
  1968. Arg->getName());
  1969. ArgVals.push_back(ParamValue::forIndirect(V));
  1970. break;
  1971. }
  1972. case ABIArgInfo::Indirect: {
  1973. assert(NumIRArgs == 1);
  1974. Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
  1975. if (!hasScalarEvaluationKind(Ty)) {
  1976. // Aggregates and complex variables are accessed by reference. All we
  1977. // need to do is realign the value, if requested.
  1978. Address V = ParamAddr;
  1979. if (ArgI.getIndirectRealign()) {
  1980. Address AlignedTemp = CreateMemTemp(Ty, "coerce");
  1981. // Copy from the incoming argument pointer to the temporary with the
  1982. // appropriate alignment.
  1983. //
  1984. // FIXME: We should have a common utility for generating an aggregate
  1985. // copy.
  1986. CharUnits Size = getContext().getTypeSizeInChars(Ty);
  1987. auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
  1988. Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
  1989. Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
  1990. Builder.CreateMemCpy(Dst, Src, SizeVal, false);
  1991. V = AlignedTemp;
  1992. }
  1993. ArgVals.push_back(ParamValue::forIndirect(V));
  1994. } else {
  1995. // Load scalar value from indirect argument.
  1996. llvm::Value *V =
  1997. EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
  1998. if (isPromoted)
  1999. V = emitArgumentDemotion(*this, Arg, V);
  2000. ArgVals.push_back(ParamValue::forDirect(V));
  2001. }
  2002. break;
  2003. }
  2004. case ABIArgInfo::Extend:
  2005. case ABIArgInfo::Direct: {
  2006. // If we have the trivial case, handle it with no muss and fuss.
  2007. if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
  2008. ArgI.getCoerceToType() == ConvertType(Ty) &&
  2009. ArgI.getDirectOffset() == 0) {
  2010. assert(NumIRArgs == 1);
  2011. llvm::Value *V = FnArgs[FirstIRArg];
  2012. auto AI = cast<llvm::Argument>(V);
  2013. if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
  2014. if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
  2015. PVD->getFunctionScopeIndex()))
  2016. AI->addAttr(llvm::Attribute::NonNull);
  2017. QualType OTy = PVD->getOriginalType();
  2018. if (const auto *ArrTy =
  2019. getContext().getAsConstantArrayType(OTy)) {
  2020. // A C99 array parameter declaration with the static keyword also
  2021. // indicates dereferenceability, and if the size is constant we can
  2022. // use the dereferenceable attribute (which requires the size in
  2023. // bytes).
  2024. if (ArrTy->getSizeModifier() == ArrayType::Static) {
  2025. QualType ETy = ArrTy->getElementType();
  2026. uint64_t ArrSize = ArrTy->getSize().getZExtValue();
  2027. if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
  2028. ArrSize) {
  2029. llvm::AttrBuilder Attrs;
  2030. Attrs.addDereferenceableAttr(
  2031. getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
  2032. AI->addAttrs(Attrs);
  2033. } else if (getContext().getTargetAddressSpace(ETy) == 0) {
  2034. AI->addAttr(llvm::Attribute::NonNull);
  2035. }
  2036. }
  2037. } else if (const auto *ArrTy =
  2038. getContext().getAsVariableArrayType(OTy)) {
  2039. // For C99 VLAs with the static keyword, we don't know the size so
  2040. // we can't use the dereferenceable attribute, but in addrspace(0)
  2041. // we know that it must be nonnull.
  2042. if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
  2043. !getContext().getTargetAddressSpace(ArrTy->getElementType()))
  2044. AI->addAttr(llvm::Attribute::NonNull);
  2045. }
  2046. const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
  2047. if (!AVAttr)
  2048. if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
  2049. AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
  2050. if (AVAttr) {
  2051. llvm::Value *AlignmentValue =
  2052. EmitScalarExpr(AVAttr->getAlignment());
  2053. llvm::ConstantInt *AlignmentCI =
  2054. cast<llvm::ConstantInt>(AlignmentValue);
  2055. unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
  2056. +llvm::Value::MaximumAlignment);
  2057. AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
  2058. }
  2059. }
  2060. if (Arg->getType().isRestrictQualified())
  2061. AI->addAttr(llvm::Attribute::NoAlias);
  2062. // LLVM expects swifterror parameters to be used in very restricted
  2063. // ways. Copy the value into a less-restricted temporary.
  2064. if (FI.getExtParameterInfo(ArgNo).getABI()
  2065. == ParameterABI::SwiftErrorResult) {
  2066. QualType pointeeTy = Ty->getPointeeType();
  2067. assert(pointeeTy->isPointerType());
  2068. Address temp =
  2069. CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
  2070. Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
  2071. llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
  2072. Builder.CreateStore(incomingErrorValue, temp);
  2073. V = temp.getPointer();
  2074. // Push a cleanup to copy the value back at the end of the function.
  2075. // The convention does not guarantee that the value will be written
  2076. // back if the function exits with an unwind exception.
  2077. EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
  2078. }
  2079. // Ensure the argument is the correct type.
  2080. if (V->getType() != ArgI.getCoerceToType())
  2081. V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
  2082. if (isPromoted)
  2083. V = emitArgumentDemotion(*this, Arg, V);
  2084. // Because of merging of function types from multiple decls it is
  2085. // possible for the type of an argument to not match the corresponding
  2086. // type in the function type. Since we are codegening the callee
  2087. // in here, add a cast to the argument type.
  2088. llvm::Type *LTy = ConvertType(Arg->getType());
  2089. if (V->getType() != LTy)
  2090. V = Builder.CreateBitCast(V, LTy);
  2091. ArgVals.push_back(ParamValue::forDirect(V));
  2092. break;
  2093. }
  2094. Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
  2095. Arg->getName());
  2096. // Pointer to store into.
  2097. Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
  2098. // Fast-isel and the optimizer generally like scalar values better than
  2099. // FCAs, so we flatten them if this is safe to do for this argument.
  2100. llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
  2101. if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
  2102. STy->getNumElements() > 1) {
  2103. auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
  2104. uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
  2105. llvm::Type *DstTy = Ptr.getElementType();
  2106. uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
  2107. Address AddrToStoreInto = Address::invalid();
  2108. if (SrcSize <= DstSize) {
  2109. AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
  2110. } else {
  2111. AddrToStoreInto =
  2112. CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
  2113. }
  2114. assert(STy->getNumElements() == NumIRArgs);
  2115. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  2116. auto AI = FnArgs[FirstIRArg + i];
  2117. AI->setName(Arg->getName() + ".coerce" + Twine(i));
  2118. auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
  2119. Address EltPtr =
  2120. Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
  2121. Builder.CreateStore(AI, EltPtr);
  2122. }
  2123. if (SrcSize > DstSize) {
  2124. Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
  2125. }
  2126. } else {
  2127. // Simple case, just do a coerced store of the argument into the alloca.
  2128. assert(NumIRArgs == 1);
  2129. auto AI = FnArgs[FirstIRArg];
  2130. AI->setName(Arg->getName() + ".coerce");
  2131. CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
  2132. }
  2133. // Match to what EmitParmDecl is expecting for this type.
  2134. if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
  2135. llvm::Value *V =
  2136. EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
  2137. if (isPromoted)
  2138. V = emitArgumentDemotion(*this, Arg, V);
  2139. ArgVals.push_back(ParamValue::forDirect(V));
  2140. } else {
  2141. ArgVals.push_back(ParamValue::forIndirect(Alloca));
  2142. }
  2143. break;
  2144. }
  2145. case ABIArgInfo::CoerceAndExpand: {
  2146. // Reconstruct into a temporary.
  2147. Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
  2148. ArgVals.push_back(ParamValue::forIndirect(alloca));
  2149. auto coercionType = ArgI.getCoerceAndExpandType();
  2150. alloca = Builder.CreateElementBitCast(alloca, coercionType);
  2151. auto layout = CGM.getDataLayout().getStructLayout(coercionType);
  2152. unsigned argIndex = FirstIRArg;
  2153. for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
  2154. llvm::Type *eltType = coercionType->getElementType(i);
  2155. if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
  2156. continue;
  2157. auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
  2158. auto elt = FnArgs[argIndex++];
  2159. Builder.CreateStore(elt, eltAddr);
  2160. }
  2161. assert(argIndex == FirstIRArg + NumIRArgs);
  2162. break;
  2163. }
  2164. case ABIArgInfo::Expand: {
  2165. // If this structure was expanded into multiple arguments then
  2166. // we need to create a temporary and reconstruct it from the
  2167. // arguments.
  2168. Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
  2169. LValue LV = MakeAddrLValue(Alloca, Ty);
  2170. ArgVals.push_back(ParamValue::forIndirect(Alloca));
  2171. auto FnArgIter = FnArgs.begin() + FirstIRArg;
  2172. ExpandTypeFromArgs(Ty, LV, FnArgIter);
  2173. assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
  2174. for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
  2175. auto AI = FnArgs[FirstIRArg + i];
  2176. AI->setName(Arg->getName() + "." + Twine(i));
  2177. }
  2178. break;
  2179. }
  2180. case ABIArgInfo::Ignore:
  2181. assert(NumIRArgs == 0);
  2182. // Initialize the local variable appropriately.
  2183. if (!hasScalarEvaluationKind(Ty)) {
  2184. ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
  2185. } else {
  2186. llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
  2187. ArgVals.push_back(ParamValue::forDirect(U));
  2188. }
  2189. break;
  2190. }
  2191. }
  2192. if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
  2193. for (int I = Args.size() - 1; I >= 0; --I)
  2194. EmitParmDecl(*Args[I], ArgVals[I], I + 1);
  2195. } else {
  2196. for (unsigned I = 0, E = Args.size(); I != E; ++I)
  2197. EmitParmDecl(*Args[I], ArgVals[I], I + 1);
  2198. }
  2199. }
  2200. static void eraseUnusedBitCasts(llvm::Instruction *insn) {
  2201. while (insn->use_empty()) {
  2202. llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
  2203. if (!bitcast) return;
  2204. // This is "safe" because we would have used a ConstantExpr otherwise.
  2205. insn = cast<llvm::Instruction>(bitcast->getOperand(0));
  2206. bitcast->eraseFromParent();
  2207. }
  2208. }
  2209. /// Try to emit a fused autorelease of a return result.
  2210. static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
  2211. llvm::Value *result) {
  2212. // We must be immediately followed the cast.
  2213. llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
  2214. if (BB->empty()) return nullptr;
  2215. if (&BB->back() != result) return nullptr;
  2216. llvm::Type *resultType = result->getType();
  2217. // result is in a BasicBlock and is therefore an Instruction.
  2218. llvm::Instruction *generator = cast<llvm::Instruction>(result);
  2219. SmallVector<llvm::Instruction *, 4> InstsToKill;
  2220. // Look for:
  2221. // %generator = bitcast %type1* %generator2 to %type2*
  2222. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
  2223. // We would have emitted this as a constant if the operand weren't
  2224. // an Instruction.
  2225. generator = cast<llvm::Instruction>(bitcast->getOperand(0));
  2226. // Require the generator to be immediately followed by the cast.
  2227. if (generator->getNextNode() != bitcast)
  2228. return nullptr;
  2229. InstsToKill.push_back(bitcast);
  2230. }
  2231. // Look for:
  2232. // %generator = call i8* @objc_retain(i8* %originalResult)
  2233. // or
  2234. // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
  2235. llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
  2236. if (!call) return nullptr;
  2237. bool doRetainAutorelease;
  2238. if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
  2239. doRetainAutorelease = true;
  2240. } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
  2241. .objc_retainAutoreleasedReturnValue) {
  2242. doRetainAutorelease = false;
  2243. // If we emitted an assembly marker for this call (and the
  2244. // ARCEntrypoints field should have been set if so), go looking
  2245. // for that call. If we can't find it, we can't do this
  2246. // optimization. But it should always be the immediately previous
  2247. // instruction, unless we needed bitcasts around the call.
  2248. if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
  2249. llvm::Instruction *prev = call->getPrevNode();
  2250. assert(prev);
  2251. if (isa<llvm::BitCastInst>(prev)) {
  2252. prev = prev->getPrevNode();
  2253. assert(prev);
  2254. }
  2255. assert(isa<llvm::CallInst>(prev));
  2256. assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
  2257. CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
  2258. InstsToKill.push_back(prev);
  2259. }
  2260. } else {
  2261. return nullptr;
  2262. }
  2263. result = call->getArgOperand(0);
  2264. InstsToKill.push_back(call);
  2265. // Keep killing bitcasts, for sanity. Note that we no longer care
  2266. // about precise ordering as long as there's exactly one use.
  2267. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
  2268. if (!bitcast->hasOneUse()) break;
  2269. InstsToKill.push_back(bitcast);
  2270. result = bitcast->getOperand(0);
  2271. }
  2272. // Delete all the unnecessary instructions, from latest to earliest.
  2273. for (auto *I : InstsToKill)
  2274. I->eraseFromParent();
  2275. // Do the fused retain/autorelease if we were asked to.
  2276. if (doRetainAutorelease)
  2277. result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
  2278. // Cast back to the result type.
  2279. return CGF.Builder.CreateBitCast(result, resultType);
  2280. }
  2281. /// If this is a +1 of the value of an immutable 'self', remove it.
  2282. static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
  2283. llvm::Value *result) {
  2284. // This is only applicable to a method with an immutable 'self'.
  2285. const ObjCMethodDecl *method =
  2286. dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
  2287. if (!method) return nullptr;
  2288. const VarDecl *self = method->getSelfDecl();
  2289. if (!self->getType().isConstQualified()) return nullptr;
  2290. // Look for a retain call.
  2291. llvm::CallInst *retainCall =
  2292. dyn_cast<llvm::CallInst>(result->stripPointerCasts());
  2293. if (!retainCall ||
  2294. retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
  2295. return nullptr;
  2296. // Look for an ordinary load of 'self'.
  2297. llvm::Value *retainedValue = retainCall->getArgOperand(0);
  2298. llvm::LoadInst *load =
  2299. dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
  2300. if (!load || load->isAtomic() || load->isVolatile() ||
  2301. load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
  2302. return nullptr;
  2303. // Okay! Burn it all down. This relies for correctness on the
  2304. // assumption that the retain is emitted as part of the return and
  2305. // that thereafter everything is used "linearly".
  2306. llvm::Type *resultType = result->getType();
  2307. eraseUnusedBitCasts(cast<llvm::Instruction>(result));
  2308. assert(retainCall->use_empty());
  2309. retainCall->eraseFromParent();
  2310. eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
  2311. return CGF.Builder.CreateBitCast(load, resultType);
  2312. }
  2313. /// Emit an ARC autorelease of the result of a function.
  2314. ///
  2315. /// \return the value to actually return from the function
  2316. static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
  2317. llvm::Value *result) {
  2318. // If we're returning 'self', kill the initial retain. This is a
  2319. // heuristic attempt to "encourage correctness" in the really unfortunate
  2320. // case where we have a return of self during a dealloc and we desperately
  2321. // need to avoid the possible autorelease.
  2322. if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
  2323. return self;
  2324. // At -O0, try to emit a fused retain/autorelease.
  2325. if (CGF.shouldUseFusedARCCalls())
  2326. if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
  2327. return fused;
  2328. return CGF.EmitARCAutoreleaseReturnValue(result);
  2329. }
  2330. /// Heuristically search for a dominating store to the return-value slot.
  2331. static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
  2332. // Check if a User is a store which pointerOperand is the ReturnValue.
  2333. // We are looking for stores to the ReturnValue, not for stores of the
  2334. // ReturnValue to some other location.
  2335. auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
  2336. auto *SI = dyn_cast<llvm::StoreInst>(U);
  2337. if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
  2338. return nullptr;
  2339. // These aren't actually possible for non-coerced returns, and we
  2340. // only care about non-coerced returns on this code path.
  2341. assert(!SI->isAtomic() && !SI->isVolatile());
  2342. return SI;
  2343. };
  2344. // If there are multiple uses of the return-value slot, just check
  2345. // for something immediately preceding the IP. Sometimes this can
  2346. // happen with how we generate implicit-returns; it can also happen
  2347. // with noreturn cleanups.
  2348. if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
  2349. llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
  2350. if (IP->empty()) return nullptr;
  2351. llvm::Instruction *I = &IP->back();
  2352. // Skip lifetime markers
  2353. for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
  2354. IE = IP->rend();
  2355. II != IE; ++II) {
  2356. if (llvm::IntrinsicInst *Intrinsic =
  2357. dyn_cast<llvm::IntrinsicInst>(&*II)) {
  2358. if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
  2359. const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
  2360. ++II;
  2361. if (II == IE)
  2362. break;
  2363. if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
  2364. continue;
  2365. }
  2366. }
  2367. I = &*II;
  2368. break;
  2369. }
  2370. return GetStoreIfValid(I);
  2371. }
  2372. llvm::StoreInst *store =
  2373. GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
  2374. if (!store) return nullptr;
  2375. // Now do a first-and-dirty dominance check: just walk up the
  2376. // single-predecessors chain from the current insertion point.
  2377. llvm::BasicBlock *StoreBB = store->getParent();
  2378. llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
  2379. while (IP != StoreBB) {
  2380. if (!(IP = IP->getSinglePredecessor()))
  2381. return nullptr;
  2382. }
  2383. // Okay, the store's basic block dominates the insertion point; we
  2384. // can do our thing.
  2385. return store;
  2386. }
  2387. void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
  2388. bool EmitRetDbgLoc,
  2389. SourceLocation EndLoc) {
  2390. if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
  2391. // Naked functions don't have epilogues.
  2392. Builder.CreateUnreachable();
  2393. return;
  2394. }
  2395. // Functions with no result always return void.
  2396. if (!ReturnValue.isValid()) {
  2397. Builder.CreateRetVoid();
  2398. return;
  2399. }
  2400. llvm::DebugLoc RetDbgLoc;
  2401. llvm::Value *RV = nullptr;
  2402. QualType RetTy = FI.getReturnType();
  2403. const ABIArgInfo &RetAI = FI.getReturnInfo();
  2404. switch (RetAI.getKind()) {
  2405. case ABIArgInfo::InAlloca:
  2406. // Aggregrates get evaluated directly into the destination. Sometimes we
  2407. // need to return the sret value in a register, though.
  2408. assert(hasAggregateEvaluationKind(RetTy));
  2409. if (RetAI.getInAllocaSRet()) {
  2410. llvm::Function::arg_iterator EI = CurFn->arg_end();
  2411. --EI;
  2412. llvm::Value *ArgStruct = &*EI;
  2413. llvm::Value *SRet = Builder.CreateStructGEP(
  2414. nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
  2415. RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
  2416. }
  2417. break;
  2418. case ABIArgInfo::Indirect: {
  2419. auto AI = CurFn->arg_begin();
  2420. if (RetAI.isSRetAfterThis())
  2421. ++AI;
  2422. switch (getEvaluationKind(RetTy)) {
  2423. case TEK_Complex: {
  2424. ComplexPairTy RT =
  2425. EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
  2426. EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
  2427. /*isInit*/ true);
  2428. break;
  2429. }
  2430. case TEK_Aggregate:
  2431. // Do nothing; aggregrates get evaluated directly into the destination.
  2432. break;
  2433. case TEK_Scalar:
  2434. EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
  2435. MakeNaturalAlignAddrLValue(&*AI, RetTy),
  2436. /*isInit*/ true);
  2437. break;
  2438. }
  2439. break;
  2440. }
  2441. case ABIArgInfo::Extend:
  2442. case ABIArgInfo::Direct:
  2443. if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
  2444. RetAI.getDirectOffset() == 0) {
  2445. // The internal return value temp always will have pointer-to-return-type
  2446. // type, just do a load.
  2447. // If there is a dominating store to ReturnValue, we can elide
  2448. // the load, zap the store, and usually zap the alloca.
  2449. if (llvm::StoreInst *SI =
  2450. findDominatingStoreToReturnValue(*this)) {
  2451. // Reuse the debug location from the store unless there is
  2452. // cleanup code to be emitted between the store and return
  2453. // instruction.
  2454. if (EmitRetDbgLoc && !AutoreleaseResult)
  2455. RetDbgLoc = SI->getDebugLoc();
  2456. // Get the stored value and nuke the now-dead store.
  2457. RV = SI->getValueOperand();
  2458. SI->eraseFromParent();
  2459. // If that was the only use of the return value, nuke it as well now.
  2460. auto returnValueInst = ReturnValue.getPointer();
  2461. if (returnValueInst->use_empty()) {
  2462. if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
  2463. alloca->eraseFromParent();
  2464. ReturnValue = Address::invalid();
  2465. }
  2466. }
  2467. // Otherwise, we have to do a simple load.
  2468. } else {
  2469. RV = Builder.CreateLoad(ReturnValue);
  2470. }
  2471. } else {
  2472. // If the value is offset in memory, apply the offset now.
  2473. Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
  2474. RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
  2475. }
  2476. // In ARC, end functions that return a retainable type with a call
  2477. // to objc_autoreleaseReturnValue.
  2478. if (AutoreleaseResult) {
  2479. #ifndef NDEBUG
  2480. // Type::isObjCRetainabletype has to be called on a QualType that hasn't
  2481. // been stripped of the typedefs, so we cannot use RetTy here. Get the
  2482. // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
  2483. // CurCodeDecl or BlockInfo.
  2484. QualType RT;
  2485. if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
  2486. RT = FD->getReturnType();
  2487. else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
  2488. RT = MD->getReturnType();
  2489. else if (isa<BlockDecl>(CurCodeDecl))
  2490. RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
  2491. else
  2492. llvm_unreachable("Unexpected function/method type");
  2493. assert(getLangOpts().ObjCAutoRefCount &&
  2494. !FI.isReturnsRetained() &&
  2495. RT->isObjCRetainableType());
  2496. #endif
  2497. RV = emitAutoreleaseOfResult(*this, RV);
  2498. }
  2499. break;
  2500. case ABIArgInfo::Ignore:
  2501. break;
  2502. case ABIArgInfo::CoerceAndExpand: {
  2503. auto coercionType = RetAI.getCoerceAndExpandType();
  2504. auto layout = CGM.getDataLayout().getStructLayout(coercionType);
  2505. // Load all of the coerced elements out into results.
  2506. llvm::SmallVector<llvm::Value*, 4> results;
  2507. Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
  2508. for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
  2509. auto coercedEltType = coercionType->getElementType(i);
  2510. if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
  2511. continue;
  2512. auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
  2513. auto elt = Builder.CreateLoad(eltAddr);
  2514. results.push_back(elt);
  2515. }
  2516. // If we have one result, it's the single direct result type.
  2517. if (results.size() == 1) {
  2518. RV = results[0];
  2519. // Otherwise, we need to make a first-class aggregate.
  2520. } else {
  2521. // Construct a return type that lacks padding elements.
  2522. llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
  2523. RV = llvm::UndefValue::get(returnType);
  2524. for (unsigned i = 0, e = results.size(); i != e; ++i) {
  2525. RV = Builder.CreateInsertValue(RV, results[i], i);
  2526. }
  2527. }
  2528. break;
  2529. }
  2530. case ABIArgInfo::Expand:
  2531. llvm_unreachable("Invalid ABI kind for return argument");
  2532. }
  2533. llvm::Instruction *Ret;
  2534. if (RV) {
  2535. EmitReturnValueCheck(RV);
  2536. Ret = Builder.CreateRet(RV);
  2537. } else {
  2538. Ret = Builder.CreateRetVoid();
  2539. }
  2540. if (RetDbgLoc)
  2541. Ret->setDebugLoc(std::move(RetDbgLoc));
  2542. }
  2543. void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
  2544. // A current decl may not be available when emitting vtable thunks.
  2545. if (!CurCodeDecl)
  2546. return;
  2547. ReturnsNonNullAttr *RetNNAttr = nullptr;
  2548. if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
  2549. RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
  2550. if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
  2551. return;
  2552. // Prefer the returns_nonnull attribute if it's present.
  2553. SourceLocation AttrLoc;
  2554. SanitizerMask CheckKind;
  2555. SanitizerHandler Handler;
  2556. if (RetNNAttr) {
  2557. assert(!requiresReturnValueNullabilityCheck() &&
  2558. "Cannot check nullability and the nonnull attribute");
  2559. AttrLoc = RetNNAttr->getLocation();
  2560. CheckKind = SanitizerKind::ReturnsNonnullAttribute;
  2561. Handler = SanitizerHandler::NonnullReturn;
  2562. } else {
  2563. if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
  2564. if (auto *TSI = DD->getTypeSourceInfo())
  2565. if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
  2566. AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
  2567. CheckKind = SanitizerKind::NullabilityReturn;
  2568. Handler = SanitizerHandler::NullabilityReturn;
  2569. }
  2570. SanitizerScope SanScope(this);
  2571. // Make sure the "return" source location is valid. If we're checking a
  2572. // nullability annotation, make sure the preconditions for the check are met.
  2573. llvm::BasicBlock *Check = createBasicBlock("nullcheck");
  2574. llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
  2575. llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
  2576. llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
  2577. if (requiresReturnValueNullabilityCheck())
  2578. CanNullCheck =
  2579. Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
  2580. Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
  2581. EmitBlock(Check);
  2582. // Now do the null check.
  2583. llvm::Value *Cond = Builder.CreateIsNotNull(RV);
  2584. llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
  2585. llvm::Value *DynamicData[] = {SLocPtr};
  2586. EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
  2587. EmitBlock(NoCheck);
  2588. #ifndef NDEBUG
  2589. // The return location should not be used after the check has been emitted.
  2590. ReturnLocation = Address::invalid();
  2591. #endif
  2592. }
  2593. static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
  2594. const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
  2595. return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
  2596. }
  2597. static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
  2598. QualType Ty) {
  2599. // FIXME: Generate IR in one pass, rather than going back and fixing up these
  2600. // placeholders.
  2601. llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
  2602. llvm::Type *IRPtrTy = IRTy->getPointerTo();
  2603. llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
  2604. // FIXME: When we generate this IR in one pass, we shouldn't need
  2605. // this win32-specific alignment hack.
  2606. CharUnits Align = CharUnits::fromQuantity(4);
  2607. Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
  2608. return AggValueSlot::forAddr(Address(Placeholder, Align),
  2609. Ty.getQualifiers(),
  2610. AggValueSlot::IsNotDestructed,
  2611. AggValueSlot::DoesNotNeedGCBarriers,
  2612. AggValueSlot::IsNotAliased);
  2613. }
  2614. void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
  2615. const VarDecl *param,
  2616. SourceLocation loc) {
  2617. // StartFunction converted the ABI-lowered parameter(s) into a
  2618. // local alloca. We need to turn that into an r-value suitable
  2619. // for EmitCall.
  2620. Address local = GetAddrOfLocalVar(param);
  2621. QualType type = param->getType();
  2622. assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
  2623. "cannot emit delegate call arguments for inalloca arguments!");
  2624. // GetAddrOfLocalVar returns a pointer-to-pointer for references,
  2625. // but the argument needs to be the original pointer.
  2626. if (type->isReferenceType()) {
  2627. args.add(RValue::get(Builder.CreateLoad(local)), type);
  2628. // In ARC, move out of consumed arguments so that the release cleanup
  2629. // entered by StartFunction doesn't cause an over-release. This isn't
  2630. // optimal -O0 code generation, but it should get cleaned up when
  2631. // optimization is enabled. This also assumes that delegate calls are
  2632. // performed exactly once for a set of arguments, but that should be safe.
  2633. } else if (getLangOpts().ObjCAutoRefCount &&
  2634. param->hasAttr<NSConsumedAttr>() &&
  2635. type->isObjCRetainableType()) {
  2636. llvm::Value *ptr = Builder.CreateLoad(local);
  2637. auto null =
  2638. llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
  2639. Builder.CreateStore(null, local);
  2640. args.add(RValue::get(ptr), type);
  2641. // For the most part, we just need to load the alloca, except that
  2642. // aggregate r-values are actually pointers to temporaries.
  2643. } else {
  2644. args.add(convertTempToRValue(local, type, loc), type);
  2645. }
  2646. }
  2647. static bool isProvablyNull(llvm::Value *addr) {
  2648. return isa<llvm::ConstantPointerNull>(addr);
  2649. }
  2650. /// Emit the actual writing-back of a writeback.
  2651. static void emitWriteback(CodeGenFunction &CGF,
  2652. const CallArgList::Writeback &writeback) {
  2653. const LValue &srcLV = writeback.Source;
  2654. Address srcAddr = srcLV.getAddress();
  2655. assert(!isProvablyNull(srcAddr.getPointer()) &&
  2656. "shouldn't have writeback for provably null argument");
  2657. llvm::BasicBlock *contBB = nullptr;
  2658. // If the argument wasn't provably non-null, we need to null check
  2659. // before doing the store.
  2660. bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
  2661. CGF.CGM.getDataLayout());
  2662. if (!provablyNonNull) {
  2663. llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
  2664. contBB = CGF.createBasicBlock("icr.done");
  2665. llvm::Value *isNull =
  2666. CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
  2667. CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
  2668. CGF.EmitBlock(writebackBB);
  2669. }
  2670. // Load the value to writeback.
  2671. llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
  2672. // Cast it back, in case we're writing an id to a Foo* or something.
  2673. value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
  2674. "icr.writeback-cast");
  2675. // Perform the writeback.
  2676. // If we have a "to use" value, it's something we need to emit a use
  2677. // of. This has to be carefully threaded in: if it's done after the
  2678. // release it's potentially undefined behavior (and the optimizer
  2679. // will ignore it), and if it happens before the retain then the
  2680. // optimizer could move the release there.
  2681. if (writeback.ToUse) {
  2682. assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
  2683. // Retain the new value. No need to block-copy here: the block's
  2684. // being passed up the stack.
  2685. value = CGF.EmitARCRetainNonBlock(value);
  2686. // Emit the intrinsic use here.
  2687. CGF.EmitARCIntrinsicUse(writeback.ToUse);
  2688. // Load the old value (primitively).
  2689. llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
  2690. // Put the new value in place (primitively).
  2691. CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
  2692. // Release the old value.
  2693. CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
  2694. // Otherwise, we can just do a normal lvalue store.
  2695. } else {
  2696. CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
  2697. }
  2698. // Jump to the continuation block.
  2699. if (!provablyNonNull)
  2700. CGF.EmitBlock(contBB);
  2701. }
  2702. static void emitWritebacks(CodeGenFunction &CGF,
  2703. const CallArgList &args) {
  2704. for (const auto &I : args.writebacks())
  2705. emitWriteback(CGF, I);
  2706. }
  2707. static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
  2708. const CallArgList &CallArgs) {
  2709. assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
  2710. ArrayRef<CallArgList::CallArgCleanup> Cleanups =
  2711. CallArgs.getCleanupsToDeactivate();
  2712. // Iterate in reverse to increase the likelihood of popping the cleanup.
  2713. for (const auto &I : llvm::reverse(Cleanups)) {
  2714. CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
  2715. I.IsActiveIP->eraseFromParent();
  2716. }
  2717. }
  2718. static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
  2719. if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
  2720. if (uop->getOpcode() == UO_AddrOf)
  2721. return uop->getSubExpr();
  2722. return nullptr;
  2723. }
  2724. /// Emit an argument that's being passed call-by-writeback. That is,
  2725. /// we are passing the address of an __autoreleased temporary; it
  2726. /// might be copy-initialized with the current value of the given
  2727. /// address, but it will definitely be copied out of after the call.
  2728. static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
  2729. const ObjCIndirectCopyRestoreExpr *CRE) {
  2730. LValue srcLV;
  2731. // Make an optimistic effort to emit the address as an l-value.
  2732. // This can fail if the argument expression is more complicated.
  2733. if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
  2734. srcLV = CGF.EmitLValue(lvExpr);
  2735. // Otherwise, just emit it as a scalar.
  2736. } else {
  2737. Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
  2738. QualType srcAddrType =
  2739. CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
  2740. srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
  2741. }
  2742. Address srcAddr = srcLV.getAddress();
  2743. // The dest and src types don't necessarily match in LLVM terms
  2744. // because of the crazy ObjC compatibility rules.
  2745. llvm::PointerType *destType =
  2746. cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
  2747. // If the address is a constant null, just pass the appropriate null.
  2748. if (isProvablyNull(srcAddr.getPointer())) {
  2749. args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
  2750. CRE->getType());
  2751. return;
  2752. }
  2753. // Create the temporary.
  2754. Address temp = CGF.CreateTempAlloca(destType->getElementType(),
  2755. CGF.getPointerAlign(),
  2756. "icr.temp");
  2757. // Loading an l-value can introduce a cleanup if the l-value is __weak,
  2758. // and that cleanup will be conditional if we can't prove that the l-value
  2759. // isn't null, so we need to register a dominating point so that the cleanups
  2760. // system will make valid IR.
  2761. CodeGenFunction::ConditionalEvaluation condEval(CGF);
  2762. // Zero-initialize it if we're not doing a copy-initialization.
  2763. bool shouldCopy = CRE->shouldCopy();
  2764. if (!shouldCopy) {
  2765. llvm::Value *null =
  2766. llvm::ConstantPointerNull::get(
  2767. cast<llvm::PointerType>(destType->getElementType()));
  2768. CGF.Builder.CreateStore(null, temp);
  2769. }
  2770. llvm::BasicBlock *contBB = nullptr;
  2771. llvm::BasicBlock *originBB = nullptr;
  2772. // If the address is *not* known to be non-null, we need to switch.
  2773. llvm::Value *finalArgument;
  2774. bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
  2775. CGF.CGM.getDataLayout());
  2776. if (provablyNonNull) {
  2777. finalArgument = temp.getPointer();
  2778. } else {
  2779. llvm::Value *isNull =
  2780. CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
  2781. finalArgument = CGF.Builder.CreateSelect(isNull,
  2782. llvm::ConstantPointerNull::get(destType),
  2783. temp.getPointer(), "icr.argument");
  2784. // If we need to copy, then the load has to be conditional, which
  2785. // means we need control flow.
  2786. if (shouldCopy) {
  2787. originBB = CGF.Builder.GetInsertBlock();
  2788. contBB = CGF.createBasicBlock("icr.cont");
  2789. llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
  2790. CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
  2791. CGF.EmitBlock(copyBB);
  2792. condEval.begin(CGF);
  2793. }
  2794. }
  2795. llvm::Value *valueToUse = nullptr;
  2796. // Perform a copy if necessary.
  2797. if (shouldCopy) {
  2798. RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
  2799. assert(srcRV.isScalar());
  2800. llvm::Value *src = srcRV.getScalarVal();
  2801. src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
  2802. "icr.cast");
  2803. // Use an ordinary store, not a store-to-lvalue.
  2804. CGF.Builder.CreateStore(src, temp);
  2805. // If optimization is enabled, and the value was held in a
  2806. // __strong variable, we need to tell the optimizer that this
  2807. // value has to stay alive until we're doing the store back.
  2808. // This is because the temporary is effectively unretained,
  2809. // and so otherwise we can violate the high-level semantics.
  2810. if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
  2811. srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
  2812. valueToUse = src;
  2813. }
  2814. }
  2815. // Finish the control flow if we needed it.
  2816. if (shouldCopy && !provablyNonNull) {
  2817. llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
  2818. CGF.EmitBlock(contBB);
  2819. // Make a phi for the value to intrinsically use.
  2820. if (valueToUse) {
  2821. llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
  2822. "icr.to-use");
  2823. phiToUse->addIncoming(valueToUse, copyBB);
  2824. phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
  2825. originBB);
  2826. valueToUse = phiToUse;
  2827. }
  2828. condEval.end(CGF);
  2829. }
  2830. args.addWriteback(srcLV, temp, valueToUse);
  2831. args.add(RValue::get(finalArgument), CRE->getType());
  2832. }
  2833. void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
  2834. assert(!StackBase);
  2835. // Save the stack.
  2836. llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
  2837. StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
  2838. }
  2839. void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
  2840. if (StackBase) {
  2841. // Restore the stack after the call.
  2842. llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
  2843. CGF.Builder.CreateCall(F, StackBase);
  2844. }
  2845. }
  2846. void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
  2847. SourceLocation ArgLoc,
  2848. AbstractCallee AC,
  2849. unsigned ParmNum) {
  2850. if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
  2851. SanOpts.has(SanitizerKind::NullabilityArg)))
  2852. return;
  2853. // The param decl may be missing in a variadic function.
  2854. auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
  2855. unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
  2856. // Prefer the nonnull attribute if it's present.
  2857. const NonNullAttr *NNAttr = nullptr;
  2858. if (SanOpts.has(SanitizerKind::NonnullAttribute))
  2859. NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
  2860. bool CanCheckNullability = false;
  2861. if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
  2862. auto Nullability = PVD->getType()->getNullability(getContext());
  2863. CanCheckNullability = Nullability &&
  2864. *Nullability == NullabilityKind::NonNull &&
  2865. PVD->getTypeSourceInfo();
  2866. }
  2867. if (!NNAttr && !CanCheckNullability)
  2868. return;
  2869. SourceLocation AttrLoc;
  2870. SanitizerMask CheckKind;
  2871. SanitizerHandler Handler;
  2872. if (NNAttr) {
  2873. AttrLoc = NNAttr->getLocation();
  2874. CheckKind = SanitizerKind::NonnullAttribute;
  2875. Handler = SanitizerHandler::NonnullArg;
  2876. } else {
  2877. AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
  2878. CheckKind = SanitizerKind::NullabilityArg;
  2879. Handler = SanitizerHandler::NullabilityArg;
  2880. }
  2881. SanitizerScope SanScope(this);
  2882. assert(RV.isScalar());
  2883. llvm::Value *V = RV.getScalarVal();
  2884. llvm::Value *Cond =
  2885. Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
  2886. llvm::Constant *StaticData[] = {
  2887. EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
  2888. llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
  2889. };
  2890. EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
  2891. }
  2892. void CodeGenFunction::EmitCallArgs(
  2893. CallArgList &Args, ArrayRef<QualType> ArgTypes,
  2894. llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
  2895. AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
  2896. assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
  2897. // We *have* to evaluate arguments from right to left in the MS C++ ABI,
  2898. // because arguments are destroyed left to right in the callee. As a special
  2899. // case, there are certain language constructs that require left-to-right
  2900. // evaluation, and in those cases we consider the evaluation order requirement
  2901. // to trump the "destruction order is reverse construction order" guarantee.
  2902. bool LeftToRight =
  2903. CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
  2904. ? Order == EvaluationOrder::ForceLeftToRight
  2905. : Order != EvaluationOrder::ForceRightToLeft;
  2906. auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
  2907. RValue EmittedArg) {
  2908. if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
  2909. return;
  2910. auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
  2911. if (PS == nullptr)
  2912. return;
  2913. const auto &Context = getContext();
  2914. auto SizeTy = Context.getSizeType();
  2915. auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
  2916. assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
  2917. llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
  2918. EmittedArg.getScalarVal());
  2919. Args.add(RValue::get(V), SizeTy);
  2920. // If we're emitting args in reverse, be sure to do so with
  2921. // pass_object_size, as well.
  2922. if (!LeftToRight)
  2923. std::swap(Args.back(), *(&Args.back() - 1));
  2924. };
  2925. // Insert a stack save if we're going to need any inalloca args.
  2926. bool HasInAllocaArgs = false;
  2927. if (CGM.getTarget().getCXXABI().isMicrosoft()) {
  2928. for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
  2929. I != E && !HasInAllocaArgs; ++I)
  2930. HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
  2931. if (HasInAllocaArgs) {
  2932. assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
  2933. Args.allocateArgumentMemory(*this);
  2934. }
  2935. }
  2936. // Evaluate each argument in the appropriate order.
  2937. size_t CallArgsStart = Args.size();
  2938. for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
  2939. unsigned Idx = LeftToRight ? I : E - I - 1;
  2940. CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
  2941. unsigned InitialArgSize = Args.size();
  2942. // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
  2943. // the argument and parameter match or the objc method is parameterized.
  2944. assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
  2945. getContext().hasSameUnqualifiedType((*Arg)->getType(),
  2946. ArgTypes[Idx]) ||
  2947. (isa<ObjCMethodDecl>(AC.getDecl()) &&
  2948. isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
  2949. "Argument and parameter types don't match");
  2950. EmitCallArg(Args, *Arg, ArgTypes[Idx]);
  2951. // In particular, we depend on it being the last arg in Args, and the
  2952. // objectsize bits depend on there only being one arg if !LeftToRight.
  2953. assert(InitialArgSize + 1 == Args.size() &&
  2954. "The code below depends on only adding one arg per EmitCallArg");
  2955. (void)InitialArgSize;
  2956. RValue RVArg = Args.back().RV;
  2957. EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
  2958. ParamsToSkip + Idx);
  2959. // @llvm.objectsize should never have side-effects and shouldn't need
  2960. // destruction/cleanups, so we can safely "emit" it after its arg,
  2961. // regardless of right-to-leftness
  2962. MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
  2963. }
  2964. if (!LeftToRight) {
  2965. // Un-reverse the arguments we just evaluated so they match up with the LLVM
  2966. // IR function.
  2967. std::reverse(Args.begin() + CallArgsStart, Args.end());
  2968. }
  2969. }
  2970. namespace {
  2971. struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
  2972. DestroyUnpassedArg(Address Addr, QualType Ty)
  2973. : Addr(Addr), Ty(Ty) {}
  2974. Address Addr;
  2975. QualType Ty;
  2976. void Emit(CodeGenFunction &CGF, Flags flags) override {
  2977. const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
  2978. assert(!Dtor->isTrivial());
  2979. CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
  2980. /*Delegating=*/false, Addr);
  2981. }
  2982. };
  2983. struct DisableDebugLocationUpdates {
  2984. CodeGenFunction &CGF;
  2985. bool disabledDebugInfo;
  2986. DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
  2987. if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
  2988. CGF.disableDebugInfo();
  2989. }
  2990. ~DisableDebugLocationUpdates() {
  2991. if (disabledDebugInfo)
  2992. CGF.enableDebugInfo();
  2993. }
  2994. };
  2995. } // end anonymous namespace
  2996. void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
  2997. QualType type) {
  2998. DisableDebugLocationUpdates Dis(*this, E);
  2999. if (const ObjCIndirectCopyRestoreExpr *CRE
  3000. = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
  3001. assert(getLangOpts().ObjCAutoRefCount);
  3002. return emitWritebackArg(*this, args, CRE);
  3003. }
  3004. assert(type->isReferenceType() == E->isGLValue() &&
  3005. "reference binding to unmaterialized r-value!");
  3006. if (E->isGLValue()) {
  3007. assert(E->getObjectKind() == OK_Ordinary);
  3008. return args.add(EmitReferenceBindingToExpr(E), type);
  3009. }
  3010. bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
  3011. // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
  3012. // However, we still have to push an EH-only cleanup in case we unwind before
  3013. // we make it to the call.
  3014. if (HasAggregateEvalKind &&
  3015. CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
  3016. // If we're using inalloca, use the argument memory. Otherwise, use a
  3017. // temporary.
  3018. AggValueSlot Slot;
  3019. if (args.isUsingInAlloca())
  3020. Slot = createPlaceholderSlot(*this, type);
  3021. else
  3022. Slot = CreateAggTemp(type, "agg.tmp");
  3023. const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
  3024. bool DestroyedInCallee =
  3025. RD && RD->hasNonTrivialDestructor() &&
  3026. CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
  3027. if (DestroyedInCallee)
  3028. Slot.setExternallyDestructed();
  3029. EmitAggExpr(E, Slot);
  3030. RValue RV = Slot.asRValue();
  3031. args.add(RV, type);
  3032. if (DestroyedInCallee) {
  3033. // Create a no-op GEP between the placeholder and the cleanup so we can
  3034. // RAUW it successfully. It also serves as a marker of the first
  3035. // instruction where the cleanup is active.
  3036. pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
  3037. type);
  3038. // This unreachable is a temporary marker which will be removed later.
  3039. llvm::Instruction *IsActive = Builder.CreateUnreachable();
  3040. args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
  3041. }
  3042. return;
  3043. }
  3044. if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
  3045. cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
  3046. LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
  3047. assert(L.isSimple());
  3048. if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
  3049. args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
  3050. } else {
  3051. // We can't represent a misaligned lvalue in the CallArgList, so copy
  3052. // to an aligned temporary now.
  3053. Address tmp = CreateMemTemp(type);
  3054. EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
  3055. args.add(RValue::getAggregate(tmp), type);
  3056. }
  3057. return;
  3058. }
  3059. args.add(EmitAnyExprToTemp(E), type);
  3060. }
  3061. QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
  3062. // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
  3063. // implicitly widens null pointer constants that are arguments to varargs
  3064. // functions to pointer-sized ints.
  3065. if (!getTarget().getTriple().isOSWindows())
  3066. return Arg->getType();
  3067. if (Arg->getType()->isIntegerType() &&
  3068. getContext().getTypeSize(Arg->getType()) <
  3069. getContext().getTargetInfo().getPointerWidth(0) &&
  3070. Arg->isNullPointerConstant(getContext(),
  3071. Expr::NPC_ValueDependentIsNotNull)) {
  3072. return getContext().getIntPtrType();
  3073. }
  3074. return Arg->getType();
  3075. }
  3076. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  3077. // optimizer it can aggressively ignore unwind edges.
  3078. void
  3079. CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
  3080. if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
  3081. !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
  3082. Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
  3083. CGM.getNoObjCARCExceptionsMetadata());
  3084. }
  3085. /// Emits a call to the given no-arguments nounwind runtime function.
  3086. llvm::CallInst *
  3087. CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
  3088. const llvm::Twine &name) {
  3089. return EmitNounwindRuntimeCall(callee, None, name);
  3090. }
  3091. /// Emits a call to the given nounwind runtime function.
  3092. llvm::CallInst *
  3093. CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
  3094. ArrayRef<llvm::Value*> args,
  3095. const llvm::Twine &name) {
  3096. llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
  3097. call->setDoesNotThrow();
  3098. return call;
  3099. }
  3100. /// Emits a simple call (never an invoke) to the given no-arguments
  3101. /// runtime function.
  3102. llvm::CallInst *
  3103. CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
  3104. const llvm::Twine &name) {
  3105. return EmitRuntimeCall(callee, None, name);
  3106. }
  3107. // Calls which may throw must have operand bundles indicating which funclet
  3108. // they are nested within.
  3109. static void
  3110. getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
  3111. SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
  3112. // There is no need for a funclet operand bundle if we aren't inside a
  3113. // funclet.
  3114. if (!CurrentFuncletPad)
  3115. return;
  3116. // Skip intrinsics which cannot throw.
  3117. auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
  3118. if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
  3119. return;
  3120. BundleList.emplace_back("funclet", CurrentFuncletPad);
  3121. }
  3122. /// Emits a simple call (never an invoke) to the given runtime function.
  3123. llvm::CallInst *
  3124. CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
  3125. ArrayRef<llvm::Value*> args,
  3126. const llvm::Twine &name) {
  3127. SmallVector<llvm::OperandBundleDef, 1> BundleList;
  3128. getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
  3129. llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
  3130. call->setCallingConv(getRuntimeCC());
  3131. return call;
  3132. }
  3133. /// Emits a call or invoke to the given noreturn runtime function.
  3134. void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
  3135. ArrayRef<llvm::Value*> args) {
  3136. SmallVector<llvm::OperandBundleDef, 1> BundleList;
  3137. getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
  3138. if (getInvokeDest()) {
  3139. llvm::InvokeInst *invoke =
  3140. Builder.CreateInvoke(callee,
  3141. getUnreachableBlock(),
  3142. getInvokeDest(),
  3143. args,
  3144. BundleList);
  3145. invoke->setDoesNotReturn();
  3146. invoke->setCallingConv(getRuntimeCC());
  3147. } else {
  3148. llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
  3149. call->setDoesNotReturn();
  3150. call->setCallingConv(getRuntimeCC());
  3151. Builder.CreateUnreachable();
  3152. }
  3153. }
  3154. /// Emits a call or invoke instruction to the given nullary runtime function.
  3155. llvm::CallSite
  3156. CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
  3157. const Twine &name) {
  3158. return EmitRuntimeCallOrInvoke(callee, None, name);
  3159. }
  3160. /// Emits a call or invoke instruction to the given runtime function.
  3161. llvm::CallSite
  3162. CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
  3163. ArrayRef<llvm::Value*> args,
  3164. const Twine &name) {
  3165. llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
  3166. callSite.setCallingConv(getRuntimeCC());
  3167. return callSite;
  3168. }
  3169. /// Emits a call or invoke instruction to the given function, depending
  3170. /// on the current state of the EH stack.
  3171. llvm::CallSite
  3172. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  3173. ArrayRef<llvm::Value *> Args,
  3174. const Twine &Name) {
  3175. llvm::BasicBlock *InvokeDest = getInvokeDest();
  3176. SmallVector<llvm::OperandBundleDef, 1> BundleList;
  3177. getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
  3178. llvm::Instruction *Inst;
  3179. if (!InvokeDest)
  3180. Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
  3181. else {
  3182. llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
  3183. Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
  3184. Name);
  3185. EmitBlock(ContBB);
  3186. }
  3187. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  3188. // optimizer it can aggressively ignore unwind edges.
  3189. if (CGM.getLangOpts().ObjCAutoRefCount)
  3190. AddObjCARCExceptionMetadata(Inst);
  3191. return llvm::CallSite(Inst);
  3192. }
  3193. /// \brief Store a non-aggregate value to an address to initialize it. For
  3194. /// initialization, a non-atomic store will be used.
  3195. static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
  3196. LValue Dst) {
  3197. if (Src.isScalar())
  3198. CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
  3199. else
  3200. CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
  3201. }
  3202. void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
  3203. llvm::Value *New) {
  3204. DeferredReplacements.push_back(std::make_pair(Old, New));
  3205. }
  3206. RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
  3207. const CGCallee &Callee,
  3208. ReturnValueSlot ReturnValue,
  3209. const CallArgList &CallArgs,
  3210. llvm::Instruction **callOrInvoke) {
  3211. // FIXME: We no longer need the types from CallArgs; lift up and simplify.
  3212. assert(Callee.isOrdinary());
  3213. // Handle struct-return functions by passing a pointer to the
  3214. // location that we would like to return into.
  3215. QualType RetTy = CallInfo.getReturnType();
  3216. const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
  3217. llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
  3218. // 1. Set up the arguments.
  3219. // If we're using inalloca, insert the allocation after the stack save.
  3220. // FIXME: Do this earlier rather than hacking it in here!
  3221. Address ArgMemory = Address::invalid();
  3222. const llvm::StructLayout *ArgMemoryLayout = nullptr;
  3223. if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
  3224. const llvm::DataLayout &DL = CGM.getDataLayout();
  3225. ArgMemoryLayout = DL.getStructLayout(ArgStruct);
  3226. llvm::Instruction *IP = CallArgs.getStackBase();
  3227. llvm::AllocaInst *AI;
  3228. if (IP) {
  3229. IP = IP->getNextNode();
  3230. AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
  3231. "argmem", IP);
  3232. } else {
  3233. AI = CreateTempAlloca(ArgStruct, "argmem");
  3234. }
  3235. auto Align = CallInfo.getArgStructAlignment();
  3236. AI->setAlignment(Align.getQuantity());
  3237. AI->setUsedWithInAlloca(true);
  3238. assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
  3239. ArgMemory = Address(AI, Align);
  3240. }
  3241. // Helper function to drill into the inalloca allocation.
  3242. auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
  3243. auto FieldOffset =
  3244. CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
  3245. return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
  3246. };
  3247. ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
  3248. SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
  3249. // If the call returns a temporary with struct return, create a temporary
  3250. // alloca to hold the result, unless one is given to us.
  3251. Address SRetPtr = Address::invalid();
  3252. size_t UnusedReturnSize = 0;
  3253. if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
  3254. if (!ReturnValue.isNull()) {
  3255. SRetPtr = ReturnValue.getValue();
  3256. } else {
  3257. SRetPtr = CreateMemTemp(RetTy);
  3258. if (HaveInsertPoint() && ReturnValue.isUnused()) {
  3259. uint64_t size =
  3260. CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
  3261. if (EmitLifetimeStart(size, SRetPtr.getPointer()))
  3262. UnusedReturnSize = size;
  3263. }
  3264. }
  3265. if (IRFunctionArgs.hasSRetArg()) {
  3266. IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
  3267. } else if (RetAI.isInAlloca()) {
  3268. Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
  3269. Builder.CreateStore(SRetPtr.getPointer(), Addr);
  3270. }
  3271. }
  3272. Address swiftErrorTemp = Address::invalid();
  3273. Address swiftErrorArg = Address::invalid();
  3274. // Translate all of the arguments as necessary to match the IR lowering.
  3275. assert(CallInfo.arg_size() == CallArgs.size() &&
  3276. "Mismatch between function signature & arguments.");
  3277. unsigned ArgNo = 0;
  3278. CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
  3279. for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
  3280. I != E; ++I, ++info_it, ++ArgNo) {
  3281. const ABIArgInfo &ArgInfo = info_it->info;
  3282. RValue RV = I->RV;
  3283. // Insert a padding argument to ensure proper alignment.
  3284. if (IRFunctionArgs.hasPaddingArg(ArgNo))
  3285. IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
  3286. llvm::UndefValue::get(ArgInfo.getPaddingType());
  3287. unsigned FirstIRArg, NumIRArgs;
  3288. std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
  3289. switch (ArgInfo.getKind()) {
  3290. case ABIArgInfo::InAlloca: {
  3291. assert(NumIRArgs == 0);
  3292. assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
  3293. if (RV.isAggregate()) {
  3294. // Replace the placeholder with the appropriate argument slot GEP.
  3295. llvm::Instruction *Placeholder =
  3296. cast<llvm::Instruction>(RV.getAggregatePointer());
  3297. CGBuilderTy::InsertPoint IP = Builder.saveIP();
  3298. Builder.SetInsertPoint(Placeholder);
  3299. Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
  3300. Builder.restoreIP(IP);
  3301. deferPlaceholderReplacement(Placeholder, Addr.getPointer());
  3302. } else {
  3303. // Store the RValue into the argument struct.
  3304. Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
  3305. unsigned AS = Addr.getType()->getPointerAddressSpace();
  3306. llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
  3307. // There are some cases where a trivial bitcast is not avoidable. The
  3308. // definition of a type later in a translation unit may change it's type
  3309. // from {}* to (%struct.foo*)*.
  3310. if (Addr.getType() != MemType)
  3311. Addr = Builder.CreateBitCast(Addr, MemType);
  3312. LValue argLV = MakeAddrLValue(Addr, I->Ty);
  3313. EmitInitStoreOfNonAggregate(*this, RV, argLV);
  3314. }
  3315. break;
  3316. }
  3317. case ABIArgInfo::Indirect: {
  3318. assert(NumIRArgs == 1);
  3319. if (RV.isScalar() || RV.isComplex()) {
  3320. // Make a temporary alloca to pass the argument.
  3321. Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
  3322. "indirect-arg-temp", false);
  3323. IRCallArgs[FirstIRArg] = Addr.getPointer();
  3324. LValue argLV = MakeAddrLValue(Addr, I->Ty);
  3325. EmitInitStoreOfNonAggregate(*this, RV, argLV);
  3326. } else {
  3327. // We want to avoid creating an unnecessary temporary+copy here;
  3328. // however, we need one in three cases:
  3329. // 1. If the argument is not byval, and we are required to copy the
  3330. // source. (This case doesn't occur on any common architecture.)
  3331. // 2. If the argument is byval, RV is not sufficiently aligned, and
  3332. // we cannot force it to be sufficiently aligned.
  3333. // 3. If the argument is byval, but RV is located in an address space
  3334. // different than that of the argument (0).
  3335. Address Addr = RV.getAggregateAddress();
  3336. CharUnits Align = ArgInfo.getIndirectAlign();
  3337. const llvm::DataLayout *TD = &CGM.getDataLayout();
  3338. const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
  3339. const unsigned ArgAddrSpace =
  3340. (FirstIRArg < IRFuncTy->getNumParams()
  3341. ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
  3342. : 0);
  3343. if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
  3344. (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
  3345. llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
  3346. Align.getQuantity(), *TD)
  3347. < Align.getQuantity()) ||
  3348. (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
  3349. // Create an aligned temporary, and copy to it.
  3350. Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
  3351. "byval-temp", false);
  3352. IRCallArgs[FirstIRArg] = AI.getPointer();
  3353. EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
  3354. } else {
  3355. // Skip the extra memcpy call.
  3356. IRCallArgs[FirstIRArg] = Addr.getPointer();
  3357. }
  3358. }
  3359. break;
  3360. }
  3361. case ABIArgInfo::Ignore:
  3362. assert(NumIRArgs == 0);
  3363. break;
  3364. case ABIArgInfo::Extend:
  3365. case ABIArgInfo::Direct: {
  3366. if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
  3367. ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
  3368. ArgInfo.getDirectOffset() == 0) {
  3369. assert(NumIRArgs == 1);
  3370. llvm::Value *V;
  3371. if (RV.isScalar())
  3372. V = RV.getScalarVal();
  3373. else
  3374. V = Builder.CreateLoad(RV.getAggregateAddress());
  3375. // Implement swifterror by copying into a new swifterror argument.
  3376. // We'll write back in the normal path out of the call.
  3377. if (CallInfo.getExtParameterInfo(ArgNo).getABI()
  3378. == ParameterABI::SwiftErrorResult) {
  3379. assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
  3380. QualType pointeeTy = I->Ty->getPointeeType();
  3381. swiftErrorArg =
  3382. Address(V, getContext().getTypeAlignInChars(pointeeTy));
  3383. swiftErrorTemp =
  3384. CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
  3385. V = swiftErrorTemp.getPointer();
  3386. cast<llvm::AllocaInst>(V)->setSwiftError(true);
  3387. llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
  3388. Builder.CreateStore(errorValue, swiftErrorTemp);
  3389. }
  3390. // We might have to widen integers, but we should never truncate.
  3391. if (ArgInfo.getCoerceToType() != V->getType() &&
  3392. V->getType()->isIntegerTy())
  3393. V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
  3394. // If the argument doesn't match, perform a bitcast to coerce it. This
  3395. // can happen due to trivial type mismatches.
  3396. if (FirstIRArg < IRFuncTy->getNumParams() &&
  3397. V->getType() != IRFuncTy->getParamType(FirstIRArg))
  3398. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
  3399. IRCallArgs[FirstIRArg] = V;
  3400. break;
  3401. }
  3402. // FIXME: Avoid the conversion through memory if possible.
  3403. Address Src = Address::invalid();
  3404. if (RV.isScalar() || RV.isComplex()) {
  3405. Src = CreateMemTemp(I->Ty, "coerce");
  3406. LValue SrcLV = MakeAddrLValue(Src, I->Ty);
  3407. EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
  3408. } else {
  3409. Src = RV.getAggregateAddress();
  3410. }
  3411. // If the value is offset in memory, apply the offset now.
  3412. Src = emitAddressAtOffset(*this, Src, ArgInfo);
  3413. // Fast-isel and the optimizer generally like scalar values better than
  3414. // FCAs, so we flatten them if this is safe to do for this argument.
  3415. llvm::StructType *STy =
  3416. dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
  3417. if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
  3418. llvm::Type *SrcTy = Src.getType()->getElementType();
  3419. uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
  3420. uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
  3421. // If the source type is smaller than the destination type of the
  3422. // coerce-to logic, copy the source value into a temp alloca the size
  3423. // of the destination type to allow loading all of it. The bits past
  3424. // the source value are left undef.
  3425. if (SrcSize < DstSize) {
  3426. Address TempAlloca
  3427. = CreateTempAlloca(STy, Src.getAlignment(),
  3428. Src.getName() + ".coerce");
  3429. Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
  3430. Src = TempAlloca;
  3431. } else {
  3432. Src = Builder.CreateBitCast(Src,
  3433. STy->getPointerTo(Src.getAddressSpace()));
  3434. }
  3435. auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
  3436. assert(NumIRArgs == STy->getNumElements());
  3437. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  3438. auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
  3439. Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
  3440. llvm::Value *LI = Builder.CreateLoad(EltPtr);
  3441. IRCallArgs[FirstIRArg + i] = LI;
  3442. }
  3443. } else {
  3444. // In the simple case, just pass the coerced loaded value.
  3445. assert(NumIRArgs == 1);
  3446. IRCallArgs[FirstIRArg] =
  3447. CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
  3448. }
  3449. break;
  3450. }
  3451. case ABIArgInfo::CoerceAndExpand: {
  3452. auto coercionType = ArgInfo.getCoerceAndExpandType();
  3453. auto layout = CGM.getDataLayout().getStructLayout(coercionType);
  3454. llvm::Value *tempSize = nullptr;
  3455. Address addr = Address::invalid();
  3456. if (RV.isAggregate()) {
  3457. addr = RV.getAggregateAddress();
  3458. } else {
  3459. assert(RV.isScalar()); // complex should always just be direct
  3460. llvm::Type *scalarType = RV.getScalarVal()->getType();
  3461. auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
  3462. auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
  3463. tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
  3464. // Materialize to a temporary.
  3465. addr = CreateTempAlloca(RV.getScalarVal()->getType(),
  3466. CharUnits::fromQuantity(std::max(layout->getAlignment(),
  3467. scalarAlign)));
  3468. EmitLifetimeStart(scalarSize, addr.getPointer());
  3469. Builder.CreateStore(RV.getScalarVal(), addr);
  3470. }
  3471. addr = Builder.CreateElementBitCast(addr, coercionType);
  3472. unsigned IRArgPos = FirstIRArg;
  3473. for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
  3474. llvm::Type *eltType = coercionType->getElementType(i);
  3475. if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
  3476. Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
  3477. llvm::Value *elt = Builder.CreateLoad(eltAddr);
  3478. IRCallArgs[IRArgPos++] = elt;
  3479. }
  3480. assert(IRArgPos == FirstIRArg + NumIRArgs);
  3481. if (tempSize) {
  3482. EmitLifetimeEnd(tempSize, addr.getPointer());
  3483. }
  3484. break;
  3485. }
  3486. case ABIArgInfo::Expand:
  3487. unsigned IRArgPos = FirstIRArg;
  3488. ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
  3489. assert(IRArgPos == FirstIRArg + NumIRArgs);
  3490. break;
  3491. }
  3492. }
  3493. llvm::Value *CalleePtr = Callee.getFunctionPointer();
  3494. // If we're using inalloca, set up that argument.
  3495. if (ArgMemory.isValid()) {
  3496. llvm::Value *Arg = ArgMemory.getPointer();
  3497. if (CallInfo.isVariadic()) {
  3498. // When passing non-POD arguments by value to variadic functions, we will
  3499. // end up with a variadic prototype and an inalloca call site. In such
  3500. // cases, we can't do any parameter mismatch checks. Give up and bitcast
  3501. // the callee.
  3502. unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
  3503. auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
  3504. CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
  3505. } else {
  3506. llvm::Type *LastParamTy =
  3507. IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
  3508. if (Arg->getType() != LastParamTy) {
  3509. #ifndef NDEBUG
  3510. // Assert that these structs have equivalent element types.
  3511. llvm::StructType *FullTy = CallInfo.getArgStruct();
  3512. llvm::StructType *DeclaredTy = cast<llvm::StructType>(
  3513. cast<llvm::PointerType>(LastParamTy)->getElementType());
  3514. assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
  3515. for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
  3516. DE = DeclaredTy->element_end(),
  3517. FI = FullTy->element_begin();
  3518. DI != DE; ++DI, ++FI)
  3519. assert(*DI == *FI);
  3520. #endif
  3521. Arg = Builder.CreateBitCast(Arg, LastParamTy);
  3522. }
  3523. }
  3524. assert(IRFunctionArgs.hasInallocaArg());
  3525. IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
  3526. }
  3527. // 2. Prepare the function pointer.
  3528. // If the callee is a bitcast of a non-variadic function to have a
  3529. // variadic function pointer type, check to see if we can remove the
  3530. // bitcast. This comes up with unprototyped functions.
  3531. //
  3532. // This makes the IR nicer, but more importantly it ensures that we
  3533. // can inline the function at -O0 if it is marked always_inline.
  3534. auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
  3535. llvm::FunctionType *CalleeFT =
  3536. cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
  3537. if (!CalleeFT->isVarArg())
  3538. return Ptr;
  3539. llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
  3540. if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
  3541. return Ptr;
  3542. llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
  3543. if (!OrigFn)
  3544. return Ptr;
  3545. llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
  3546. // If the original type is variadic, or if any of the component types
  3547. // disagree, we cannot remove the cast.
  3548. if (OrigFT->isVarArg() ||
  3549. OrigFT->getNumParams() != CalleeFT->getNumParams() ||
  3550. OrigFT->getReturnType() != CalleeFT->getReturnType())
  3551. return Ptr;
  3552. for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
  3553. if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
  3554. return Ptr;
  3555. return OrigFn;
  3556. };
  3557. CalleePtr = simplifyVariadicCallee(CalleePtr);
  3558. // 3. Perform the actual call.
  3559. // Deactivate any cleanups that we're supposed to do immediately before
  3560. // the call.
  3561. if (!CallArgs.getCleanupsToDeactivate().empty())
  3562. deactivateArgCleanupsBeforeCall(*this, CallArgs);
  3563. // Assert that the arguments we computed match up. The IR verifier
  3564. // will catch this, but this is a common enough source of problems
  3565. // during IRGen changes that it's way better for debugging to catch
  3566. // it ourselves here.
  3567. #ifndef NDEBUG
  3568. assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
  3569. for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
  3570. // Inalloca argument can have different type.
  3571. if (IRFunctionArgs.hasInallocaArg() &&
  3572. i == IRFunctionArgs.getInallocaArgNo())
  3573. continue;
  3574. if (i < IRFuncTy->getNumParams())
  3575. assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
  3576. }
  3577. #endif
  3578. // Compute the calling convention and attributes.
  3579. unsigned CallingConv;
  3580. llvm::AttributeList Attrs;
  3581. CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
  3582. Callee.getAbstractInfo(), Attrs, CallingConv,
  3583. /*AttrOnCallSite=*/true);
  3584. // Apply some call-site-specific attributes.
  3585. // TODO: work this into building the attribute set.
  3586. // Apply always_inline to all calls within flatten functions.
  3587. // FIXME: should this really take priority over __try, below?
  3588. if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
  3589. !(Callee.getAbstractInfo().getCalleeDecl() &&
  3590. Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
  3591. Attrs =
  3592. Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
  3593. llvm::Attribute::AlwaysInline);
  3594. }
  3595. // Disable inlining inside SEH __try blocks.
  3596. if (isSEHTryScope()) {
  3597. Attrs =
  3598. Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
  3599. llvm::Attribute::NoInline);
  3600. }
  3601. // Decide whether to use a call or an invoke.
  3602. bool CannotThrow;
  3603. if (currentFunctionUsesSEHTry()) {
  3604. // SEH cares about asynchronous exceptions, so everything can "throw."
  3605. CannotThrow = false;
  3606. } else if (isCleanupPadScope() &&
  3607. EHPersonality::get(*this).isMSVCXXPersonality()) {
  3608. // The MSVC++ personality will implicitly terminate the program if an
  3609. // exception is thrown during a cleanup outside of a try/catch.
  3610. // We don't need to model anything in IR to get this behavior.
  3611. CannotThrow = true;
  3612. } else {
  3613. // Otherwise, nounwind call sites will never throw.
  3614. CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
  3615. llvm::Attribute::NoUnwind);
  3616. }
  3617. llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
  3618. SmallVector<llvm::OperandBundleDef, 1> BundleList;
  3619. getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
  3620. // Emit the actual call/invoke instruction.
  3621. llvm::CallSite CS;
  3622. if (!InvokeDest) {
  3623. CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
  3624. } else {
  3625. llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
  3626. CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
  3627. BundleList);
  3628. EmitBlock(Cont);
  3629. }
  3630. llvm::Instruction *CI = CS.getInstruction();
  3631. if (callOrInvoke)
  3632. *callOrInvoke = CI;
  3633. // Apply the attributes and calling convention.
  3634. CS.setAttributes(Attrs);
  3635. CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
  3636. // Apply various metadata.
  3637. if (!CI->getType()->isVoidTy())
  3638. CI->setName("call");
  3639. // Insert instrumentation or attach profile metadata at indirect call sites.
  3640. // For more details, see the comment before the definition of
  3641. // IPVK_IndirectCallTarget in InstrProfData.inc.
  3642. if (!CS.getCalledFunction())
  3643. PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
  3644. CI, CalleePtr);
  3645. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  3646. // optimizer it can aggressively ignore unwind edges.
  3647. if (CGM.getLangOpts().ObjCAutoRefCount)
  3648. AddObjCARCExceptionMetadata(CI);
  3649. // Suppress tail calls if requested.
  3650. if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
  3651. const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
  3652. if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
  3653. Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
  3654. }
  3655. // 4. Finish the call.
  3656. // If the call doesn't return, finish the basic block and clear the
  3657. // insertion point; this allows the rest of IRGen to discard
  3658. // unreachable code.
  3659. if (CS.doesNotReturn()) {
  3660. if (UnusedReturnSize)
  3661. EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
  3662. SRetPtr.getPointer());
  3663. Builder.CreateUnreachable();
  3664. Builder.ClearInsertionPoint();
  3665. // FIXME: For now, emit a dummy basic block because expr emitters in
  3666. // generally are not ready to handle emitting expressions at unreachable
  3667. // points.
  3668. EnsureInsertPoint();
  3669. // Return a reasonable RValue.
  3670. return GetUndefRValue(RetTy);
  3671. }
  3672. // Perform the swifterror writeback.
  3673. if (swiftErrorTemp.isValid()) {
  3674. llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
  3675. Builder.CreateStore(errorResult, swiftErrorArg);
  3676. }
  3677. // Emit any call-associated writebacks immediately. Arguably this
  3678. // should happen after any return-value munging.
  3679. if (CallArgs.hasWritebacks())
  3680. emitWritebacks(*this, CallArgs);
  3681. // The stack cleanup for inalloca arguments has to run out of the normal
  3682. // lexical order, so deactivate it and run it manually here.
  3683. CallArgs.freeArgumentMemory(*this);
  3684. // Extract the return value.
  3685. RValue Ret = [&] {
  3686. switch (RetAI.getKind()) {
  3687. case ABIArgInfo::CoerceAndExpand: {
  3688. auto coercionType = RetAI.getCoerceAndExpandType();
  3689. auto layout = CGM.getDataLayout().getStructLayout(coercionType);
  3690. Address addr = SRetPtr;
  3691. addr = Builder.CreateElementBitCast(addr, coercionType);
  3692. assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
  3693. bool requiresExtract = isa<llvm::StructType>(CI->getType());
  3694. unsigned unpaddedIndex = 0;
  3695. for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
  3696. llvm::Type *eltType = coercionType->getElementType(i);
  3697. if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
  3698. Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
  3699. llvm::Value *elt = CI;
  3700. if (requiresExtract)
  3701. elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
  3702. else
  3703. assert(unpaddedIndex == 0);
  3704. Builder.CreateStore(elt, eltAddr);
  3705. }
  3706. // FALLTHROUGH
  3707. LLVM_FALLTHROUGH;
  3708. }
  3709. case ABIArgInfo::InAlloca:
  3710. case ABIArgInfo::Indirect: {
  3711. RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
  3712. if (UnusedReturnSize)
  3713. EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
  3714. SRetPtr.getPointer());
  3715. return ret;
  3716. }
  3717. case ABIArgInfo::Ignore:
  3718. // If we are ignoring an argument that had a result, make sure to
  3719. // construct the appropriate return value for our caller.
  3720. return GetUndefRValue(RetTy);
  3721. case ABIArgInfo::Extend:
  3722. case ABIArgInfo::Direct: {
  3723. llvm::Type *RetIRTy = ConvertType(RetTy);
  3724. if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
  3725. switch (getEvaluationKind(RetTy)) {
  3726. case TEK_Complex: {
  3727. llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
  3728. llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
  3729. return RValue::getComplex(std::make_pair(Real, Imag));
  3730. }
  3731. case TEK_Aggregate: {
  3732. Address DestPtr = ReturnValue.getValue();
  3733. bool DestIsVolatile = ReturnValue.isVolatile();
  3734. if (!DestPtr.isValid()) {
  3735. DestPtr = CreateMemTemp(RetTy, "agg.tmp");
  3736. DestIsVolatile = false;
  3737. }
  3738. BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
  3739. return RValue::getAggregate(DestPtr);
  3740. }
  3741. case TEK_Scalar: {
  3742. // If the argument doesn't match, perform a bitcast to coerce it. This
  3743. // can happen due to trivial type mismatches.
  3744. llvm::Value *V = CI;
  3745. if (V->getType() != RetIRTy)
  3746. V = Builder.CreateBitCast(V, RetIRTy);
  3747. return RValue::get(V);
  3748. }
  3749. }
  3750. llvm_unreachable("bad evaluation kind");
  3751. }
  3752. Address DestPtr = ReturnValue.getValue();
  3753. bool DestIsVolatile = ReturnValue.isVolatile();
  3754. if (!DestPtr.isValid()) {
  3755. DestPtr = CreateMemTemp(RetTy, "coerce");
  3756. DestIsVolatile = false;
  3757. }
  3758. // If the value is offset in memory, apply the offset now.
  3759. Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
  3760. CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
  3761. return convertTempToRValue(DestPtr, RetTy, SourceLocation());
  3762. }
  3763. case ABIArgInfo::Expand:
  3764. llvm_unreachable("Invalid ABI kind for return argument");
  3765. }
  3766. llvm_unreachable("Unhandled ABIArgInfo::Kind");
  3767. } ();
  3768. // Emit the assume_aligned check on the return value.
  3769. const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
  3770. if (Ret.isScalar() && TargetDecl) {
  3771. if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
  3772. llvm::Value *OffsetValue = nullptr;
  3773. if (const auto *Offset = AA->getOffset())
  3774. OffsetValue = EmitScalarExpr(Offset);
  3775. llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
  3776. llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
  3777. EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
  3778. OffsetValue);
  3779. } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
  3780. llvm::Value *ParamVal =
  3781. CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
  3782. EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
  3783. }
  3784. }
  3785. return Ret;
  3786. }
  3787. /* VarArg handling */
  3788. Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
  3789. VAListAddr = VE->isMicrosoftABI()
  3790. ? EmitMSVAListRef(VE->getSubExpr())
  3791. : EmitVAListRef(VE->getSubExpr());
  3792. QualType Ty = VE->getType();
  3793. if (VE->isMicrosoftABI())
  3794. return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
  3795. return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
  3796. }