CGCall.cpp 100 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628
  1. //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // These classes wrap the information about a call or function
  11. // definition used to handle ABI compliancy.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "CGCall.h"
  15. #include "ABIInfo.h"
  16. #include "CGCXXABI.h"
  17. #include "CodeGenFunction.h"
  18. #include "CodeGenModule.h"
  19. #include "TargetInfo.h"
  20. #include "clang/AST/Decl.h"
  21. #include "clang/AST/DeclCXX.h"
  22. #include "clang/AST/DeclObjC.h"
  23. #include "clang/Basic/TargetInfo.h"
  24. #include "clang/Frontend/CodeGenOptions.h"
  25. #include "llvm/ADT/StringExtras.h"
  26. #include "llvm/IR/Attributes.h"
  27. #include "llvm/IR/DataLayout.h"
  28. #include "llvm/IR/InlineAsm.h"
  29. #include "llvm/MC/SubtargetFeature.h"
  30. #include "llvm/Support/CallSite.h"
  31. #include "llvm/Transforms/Utils/Local.h"
  32. using namespace clang;
  33. using namespace CodeGen;
  34. /***/
  35. static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
  36. switch (CC) {
  37. default: return llvm::CallingConv::C;
  38. case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
  39. case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
  40. case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
  41. case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
  42. case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
  43. case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
  44. // TODO: add support for CC_X86Pascal to llvm
  45. }
  46. }
  47. /// Derives the 'this' type for codegen purposes, i.e. ignoring method
  48. /// qualification.
  49. /// FIXME: address space qualification?
  50. static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
  51. QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
  52. return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
  53. }
  54. /// Returns the canonical formal type of the given C++ method.
  55. static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
  56. return MD->getType()->getCanonicalTypeUnqualified()
  57. .getAs<FunctionProtoType>();
  58. }
  59. /// Returns the "extra-canonicalized" return type, which discards
  60. /// qualifiers on the return type. Codegen doesn't care about them,
  61. /// and it makes ABI code a little easier to be able to assume that
  62. /// all parameter and return types are top-level unqualified.
  63. static CanQualType GetReturnType(QualType RetTy) {
  64. return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
  65. }
  66. /// Arrange the argument and result information for a value of the given
  67. /// unprototyped freestanding function type.
  68. const CGFunctionInfo &
  69. CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
  70. // When translating an unprototyped function type, always use a
  71. // variadic type.
  72. return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
  73. None, FTNP->getExtInfo(), RequiredArgs(0));
  74. }
  75. /// Arrange the LLVM function layout for a value of the given function
  76. /// type, on top of any implicit parameters already stored. Use the
  77. /// given ExtInfo instead of the ExtInfo from the function type.
  78. static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
  79. SmallVectorImpl<CanQualType> &prefix,
  80. CanQual<FunctionProtoType> FTP,
  81. FunctionType::ExtInfo extInfo) {
  82. RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
  83. // FIXME: Kill copy.
  84. for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
  85. prefix.push_back(FTP->getArgType(i));
  86. CanQualType resultType = FTP->getResultType().getUnqualifiedType();
  87. return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
  88. }
  89. /// Arrange the argument and result information for a free function (i.e.
  90. /// not a C++ or ObjC instance method) of the given type.
  91. static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
  92. SmallVectorImpl<CanQualType> &prefix,
  93. CanQual<FunctionProtoType> FTP) {
  94. return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
  95. }
  96. /// Given the formal ext-info of a C++ instance method, adjust it
  97. /// according to the C++ ABI in effect.
  98. static void adjustCXXMethodInfo(CodeGenTypes &CGT,
  99. FunctionType::ExtInfo &extInfo,
  100. bool isVariadic) {
  101. if (extInfo.getCC() == CC_Default) {
  102. CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
  103. extInfo = extInfo.withCallingConv(CC);
  104. }
  105. }
  106. /// Arrange the argument and result information for a free function (i.e.
  107. /// not a C++ or ObjC instance method) of the given type.
  108. static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
  109. SmallVectorImpl<CanQualType> &prefix,
  110. CanQual<FunctionProtoType> FTP) {
  111. FunctionType::ExtInfo extInfo = FTP->getExtInfo();
  112. adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
  113. return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
  114. }
  115. /// Arrange the argument and result information for a value of the
  116. /// given freestanding function type.
  117. const CGFunctionInfo &
  118. CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
  119. SmallVector<CanQualType, 16> argTypes;
  120. return ::arrangeFreeFunctionType(*this, argTypes, FTP);
  121. }
  122. static CallingConv getCallingConventionForDecl(const Decl *D) {
  123. // Set the appropriate calling convention for the Function.
  124. if (D->hasAttr<StdCallAttr>())
  125. return CC_X86StdCall;
  126. if (D->hasAttr<FastCallAttr>())
  127. return CC_X86FastCall;
  128. if (D->hasAttr<ThisCallAttr>())
  129. return CC_X86ThisCall;
  130. if (D->hasAttr<PascalAttr>())
  131. return CC_X86Pascal;
  132. if (PcsAttr *PCS = D->getAttr<PcsAttr>())
  133. return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
  134. if (D->hasAttr<PnaclCallAttr>())
  135. return CC_PnaclCall;
  136. if (D->hasAttr<IntelOclBiccAttr>())
  137. return CC_IntelOclBicc;
  138. return CC_C;
  139. }
  140. /// Arrange the argument and result information for a call to an
  141. /// unknown C++ non-static member function of the given abstract type.
  142. /// The member function must be an ordinary function, i.e. not a
  143. /// constructor or destructor.
  144. const CGFunctionInfo &
  145. CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
  146. const FunctionProtoType *FTP) {
  147. SmallVector<CanQualType, 16> argTypes;
  148. // Add the 'this' pointer.
  149. argTypes.push_back(GetThisType(Context, RD));
  150. return ::arrangeCXXMethodType(*this, argTypes,
  151. FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
  152. }
  153. /// Arrange the argument and result information for a declaration or
  154. /// definition of the given C++ non-static member function. The
  155. /// member function must be an ordinary function, i.e. not a
  156. /// constructor or destructor.
  157. const CGFunctionInfo &
  158. CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
  159. assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
  160. assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
  161. CanQual<FunctionProtoType> prototype = GetFormalType(MD);
  162. if (MD->isInstance()) {
  163. // The abstract case is perfectly fine.
  164. return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
  165. }
  166. return arrangeFreeFunctionType(prototype);
  167. }
  168. /// Arrange the argument and result information for a declaration
  169. /// or definition to the given constructor variant.
  170. const CGFunctionInfo &
  171. CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
  172. CXXCtorType ctorKind) {
  173. SmallVector<CanQualType, 16> argTypes;
  174. argTypes.push_back(GetThisType(Context, D->getParent()));
  175. GlobalDecl GD(D, ctorKind);
  176. CanQualType resultType =
  177. TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
  178. TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
  179. CanQual<FunctionProtoType> FTP = GetFormalType(D);
  180. RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
  181. // Add the formal parameters.
  182. for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
  183. argTypes.push_back(FTP->getArgType(i));
  184. FunctionType::ExtInfo extInfo = FTP->getExtInfo();
  185. adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
  186. return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
  187. }
  188. /// Arrange the argument and result information for a declaration,
  189. /// definition, or call to the given destructor variant. It so
  190. /// happens that all three cases produce the same information.
  191. const CGFunctionInfo &
  192. CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
  193. CXXDtorType dtorKind) {
  194. SmallVector<CanQualType, 2> argTypes;
  195. argTypes.push_back(GetThisType(Context, D->getParent()));
  196. GlobalDecl GD(D, dtorKind);
  197. CanQualType resultType =
  198. TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
  199. TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
  200. CanQual<FunctionProtoType> FTP = GetFormalType(D);
  201. assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
  202. assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
  203. FunctionType::ExtInfo extInfo = FTP->getExtInfo();
  204. adjustCXXMethodInfo(*this, extInfo, false);
  205. return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
  206. RequiredArgs::All);
  207. }
  208. /// Arrange the argument and result information for the declaration or
  209. /// definition of the given function.
  210. const CGFunctionInfo &
  211. CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
  212. if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
  213. if (MD->isInstance())
  214. return arrangeCXXMethodDeclaration(MD);
  215. CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
  216. assert(isa<FunctionType>(FTy));
  217. // When declaring a function without a prototype, always use a
  218. // non-variadic type.
  219. if (isa<FunctionNoProtoType>(FTy)) {
  220. CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
  221. return arrangeLLVMFunctionInfo(noProto->getResultType(), None,
  222. noProto->getExtInfo(), RequiredArgs::All);
  223. }
  224. assert(isa<FunctionProtoType>(FTy));
  225. return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
  226. }
  227. /// Arrange the argument and result information for the declaration or
  228. /// definition of an Objective-C method.
  229. const CGFunctionInfo &
  230. CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
  231. // It happens that this is the same as a call with no optional
  232. // arguments, except also using the formal 'self' type.
  233. return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
  234. }
  235. /// Arrange the argument and result information for the function type
  236. /// through which to perform a send to the given Objective-C method,
  237. /// using the given receiver type. The receiver type is not always
  238. /// the 'self' type of the method or even an Objective-C pointer type.
  239. /// This is *not* the right method for actually performing such a
  240. /// message send, due to the possibility of optional arguments.
  241. const CGFunctionInfo &
  242. CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
  243. QualType receiverType) {
  244. SmallVector<CanQualType, 16> argTys;
  245. argTys.push_back(Context.getCanonicalParamType(receiverType));
  246. argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
  247. // FIXME: Kill copy?
  248. for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
  249. e = MD->param_end(); i != e; ++i) {
  250. argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
  251. }
  252. FunctionType::ExtInfo einfo;
  253. einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
  254. if (getContext().getLangOpts().ObjCAutoRefCount &&
  255. MD->hasAttr<NSReturnsRetainedAttr>())
  256. einfo = einfo.withProducesResult(true);
  257. RequiredArgs required =
  258. (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
  259. return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
  260. einfo, required);
  261. }
  262. const CGFunctionInfo &
  263. CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
  264. // FIXME: Do we need to handle ObjCMethodDecl?
  265. const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
  266. if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
  267. return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
  268. if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
  269. return arrangeCXXDestructor(DD, GD.getDtorType());
  270. return arrangeFunctionDeclaration(FD);
  271. }
  272. /// Arrange a call as unto a free function, except possibly with an
  273. /// additional number of formal parameters considered required.
  274. static const CGFunctionInfo &
  275. arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
  276. const CallArgList &args,
  277. const FunctionType *fnType,
  278. unsigned numExtraRequiredArgs) {
  279. assert(args.size() >= numExtraRequiredArgs);
  280. // In most cases, there are no optional arguments.
  281. RequiredArgs required = RequiredArgs::All;
  282. // If we have a variadic prototype, the required arguments are the
  283. // extra prefix plus the arguments in the prototype.
  284. if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
  285. if (proto->isVariadic())
  286. required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
  287. // If we don't have a prototype at all, but we're supposed to
  288. // explicitly use the variadic convention for unprototyped calls,
  289. // treat all of the arguments as required but preserve the nominal
  290. // possibility of variadics.
  291. } else if (CGT.CGM.getTargetCodeGenInfo()
  292. .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
  293. required = RequiredArgs(args.size());
  294. }
  295. return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
  296. fnType->getExtInfo(), required);
  297. }
  298. /// Figure out the rules for calling a function with the given formal
  299. /// type using the given arguments. The arguments are necessary
  300. /// because the function might be unprototyped, in which case it's
  301. /// target-dependent in crazy ways.
  302. const CGFunctionInfo &
  303. CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
  304. const FunctionType *fnType) {
  305. return arrangeFreeFunctionLikeCall(*this, args, fnType, 0);
  306. }
  307. /// A block function call is essentially a free-function call with an
  308. /// extra implicit argument.
  309. const CGFunctionInfo &
  310. CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
  311. const FunctionType *fnType) {
  312. return arrangeFreeFunctionLikeCall(*this, args, fnType, 1);
  313. }
  314. const CGFunctionInfo &
  315. CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
  316. const CallArgList &args,
  317. FunctionType::ExtInfo info,
  318. RequiredArgs required) {
  319. // FIXME: Kill copy.
  320. SmallVector<CanQualType, 16> argTypes;
  321. for (CallArgList::const_iterator i = args.begin(), e = args.end();
  322. i != e; ++i)
  323. argTypes.push_back(Context.getCanonicalParamType(i->Ty));
  324. return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
  325. required);
  326. }
  327. /// Arrange a call to a C++ method, passing the given arguments.
  328. const CGFunctionInfo &
  329. CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
  330. const FunctionProtoType *FPT,
  331. RequiredArgs required) {
  332. // FIXME: Kill copy.
  333. SmallVector<CanQualType, 16> argTypes;
  334. for (CallArgList::const_iterator i = args.begin(), e = args.end();
  335. i != e; ++i)
  336. argTypes.push_back(Context.getCanonicalParamType(i->Ty));
  337. FunctionType::ExtInfo info = FPT->getExtInfo();
  338. adjustCXXMethodInfo(*this, info, FPT->isVariadic());
  339. return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
  340. argTypes, info, required);
  341. }
  342. const CGFunctionInfo &
  343. CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
  344. const FunctionArgList &args,
  345. const FunctionType::ExtInfo &info,
  346. bool isVariadic) {
  347. // FIXME: Kill copy.
  348. SmallVector<CanQualType, 16> argTypes;
  349. for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
  350. i != e; ++i)
  351. argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
  352. RequiredArgs required =
  353. (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
  354. return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
  355. required);
  356. }
  357. const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
  358. return arrangeLLVMFunctionInfo(getContext().VoidTy, None,
  359. FunctionType::ExtInfo(), RequiredArgs::All);
  360. }
  361. /// Arrange the argument and result information for an abstract value
  362. /// of a given function type. This is the method which all of the
  363. /// above functions ultimately defer to.
  364. const CGFunctionInfo &
  365. CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
  366. ArrayRef<CanQualType> argTypes,
  367. FunctionType::ExtInfo info,
  368. RequiredArgs required) {
  369. #ifndef NDEBUG
  370. for (ArrayRef<CanQualType>::const_iterator
  371. I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
  372. assert(I->isCanonicalAsParam());
  373. #endif
  374. unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
  375. // Lookup or create unique function info.
  376. llvm::FoldingSetNodeID ID;
  377. CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
  378. void *insertPos = 0;
  379. CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
  380. if (FI)
  381. return *FI;
  382. // Construct the function info. We co-allocate the ArgInfos.
  383. FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
  384. FunctionInfos.InsertNode(FI, insertPos);
  385. bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
  386. assert(inserted && "Recursively being processed?");
  387. // Compute ABI information.
  388. getABIInfo().computeInfo(*FI);
  389. // Loop over all of the computed argument and return value info. If any of
  390. // them are direct or extend without a specified coerce type, specify the
  391. // default now.
  392. ABIArgInfo &retInfo = FI->getReturnInfo();
  393. if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
  394. retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
  395. for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
  396. I != E; ++I)
  397. if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
  398. I->info.setCoerceToType(ConvertType(I->type));
  399. bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
  400. assert(erased && "Not in set?");
  401. return *FI;
  402. }
  403. CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
  404. const FunctionType::ExtInfo &info,
  405. CanQualType resultType,
  406. ArrayRef<CanQualType> argTypes,
  407. RequiredArgs required) {
  408. void *buffer = operator new(sizeof(CGFunctionInfo) +
  409. sizeof(ArgInfo) * (argTypes.size() + 1));
  410. CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
  411. FI->CallingConvention = llvmCC;
  412. FI->EffectiveCallingConvention = llvmCC;
  413. FI->ASTCallingConvention = info.getCC();
  414. FI->NoReturn = info.getNoReturn();
  415. FI->ReturnsRetained = info.getProducesResult();
  416. FI->Required = required;
  417. FI->HasRegParm = info.getHasRegParm();
  418. FI->RegParm = info.getRegParm();
  419. FI->NumArgs = argTypes.size();
  420. FI->getArgsBuffer()[0].type = resultType;
  421. for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
  422. FI->getArgsBuffer()[i + 1].type = argTypes[i];
  423. return FI;
  424. }
  425. /***/
  426. void CodeGenTypes::GetExpandedTypes(QualType type,
  427. SmallVectorImpl<llvm::Type*> &expandedTypes) {
  428. if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
  429. uint64_t NumElts = AT->getSize().getZExtValue();
  430. for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
  431. GetExpandedTypes(AT->getElementType(), expandedTypes);
  432. } else if (const RecordType *RT = type->getAs<RecordType>()) {
  433. const RecordDecl *RD = RT->getDecl();
  434. assert(!RD->hasFlexibleArrayMember() &&
  435. "Cannot expand structure with flexible array.");
  436. if (RD->isUnion()) {
  437. // Unions can be here only in degenerative cases - all the fields are same
  438. // after flattening. Thus we have to use the "largest" field.
  439. const FieldDecl *LargestFD = 0;
  440. CharUnits UnionSize = CharUnits::Zero();
  441. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  442. i != e; ++i) {
  443. const FieldDecl *FD = *i;
  444. assert(!FD->isBitField() &&
  445. "Cannot expand structure with bit-field members.");
  446. CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
  447. if (UnionSize < FieldSize) {
  448. UnionSize = FieldSize;
  449. LargestFD = FD;
  450. }
  451. }
  452. if (LargestFD)
  453. GetExpandedTypes(LargestFD->getType(), expandedTypes);
  454. } else {
  455. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  456. i != e; ++i) {
  457. assert(!i->isBitField() &&
  458. "Cannot expand structure with bit-field members.");
  459. GetExpandedTypes(i->getType(), expandedTypes);
  460. }
  461. }
  462. } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
  463. llvm::Type *EltTy = ConvertType(CT->getElementType());
  464. expandedTypes.push_back(EltTy);
  465. expandedTypes.push_back(EltTy);
  466. } else
  467. expandedTypes.push_back(ConvertType(type));
  468. }
  469. llvm::Function::arg_iterator
  470. CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
  471. llvm::Function::arg_iterator AI) {
  472. assert(LV.isSimple() &&
  473. "Unexpected non-simple lvalue during struct expansion.");
  474. if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
  475. unsigned NumElts = AT->getSize().getZExtValue();
  476. QualType EltTy = AT->getElementType();
  477. for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
  478. llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
  479. LValue LV = MakeAddrLValue(EltAddr, EltTy);
  480. AI = ExpandTypeFromArgs(EltTy, LV, AI);
  481. }
  482. } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
  483. RecordDecl *RD = RT->getDecl();
  484. if (RD->isUnion()) {
  485. // Unions can be here only in degenerative cases - all the fields are same
  486. // after flattening. Thus we have to use the "largest" field.
  487. const FieldDecl *LargestFD = 0;
  488. CharUnits UnionSize = CharUnits::Zero();
  489. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  490. i != e; ++i) {
  491. const FieldDecl *FD = *i;
  492. assert(!FD->isBitField() &&
  493. "Cannot expand structure with bit-field members.");
  494. CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
  495. if (UnionSize < FieldSize) {
  496. UnionSize = FieldSize;
  497. LargestFD = FD;
  498. }
  499. }
  500. if (LargestFD) {
  501. // FIXME: What are the right qualifiers here?
  502. LValue SubLV = EmitLValueForField(LV, LargestFD);
  503. AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
  504. }
  505. } else {
  506. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  507. i != e; ++i) {
  508. FieldDecl *FD = *i;
  509. QualType FT = FD->getType();
  510. // FIXME: What are the right qualifiers here?
  511. LValue SubLV = EmitLValueForField(LV, FD);
  512. AI = ExpandTypeFromArgs(FT, SubLV, AI);
  513. }
  514. }
  515. } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
  516. QualType EltTy = CT->getElementType();
  517. llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
  518. EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
  519. llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
  520. EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
  521. } else {
  522. EmitStoreThroughLValue(RValue::get(AI), LV);
  523. ++AI;
  524. }
  525. return AI;
  526. }
  527. /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
  528. /// accessing some number of bytes out of it, try to gep into the struct to get
  529. /// at its inner goodness. Dive as deep as possible without entering an element
  530. /// with an in-memory size smaller than DstSize.
  531. static llvm::Value *
  532. EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
  533. llvm::StructType *SrcSTy,
  534. uint64_t DstSize, CodeGenFunction &CGF) {
  535. // We can't dive into a zero-element struct.
  536. if (SrcSTy->getNumElements() == 0) return SrcPtr;
  537. llvm::Type *FirstElt = SrcSTy->getElementType(0);
  538. // If the first elt is at least as large as what we're looking for, or if the
  539. // first element is the same size as the whole struct, we can enter it.
  540. uint64_t FirstEltSize =
  541. CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
  542. if (FirstEltSize < DstSize &&
  543. FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
  544. return SrcPtr;
  545. // GEP into the first element.
  546. SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
  547. // If the first element is a struct, recurse.
  548. llvm::Type *SrcTy =
  549. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  550. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
  551. return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  552. return SrcPtr;
  553. }
  554. /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
  555. /// are either integers or pointers. This does a truncation of the value if it
  556. /// is too large or a zero extension if it is too small.
  557. ///
  558. /// This behaves as if the value were coerced through memory, so on big-endian
  559. /// targets the high bits are preserved in a truncation, while little-endian
  560. /// targets preserve the low bits.
  561. static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
  562. llvm::Type *Ty,
  563. CodeGenFunction &CGF) {
  564. if (Val->getType() == Ty)
  565. return Val;
  566. if (isa<llvm::PointerType>(Val->getType())) {
  567. // If this is Pointer->Pointer avoid conversion to and from int.
  568. if (isa<llvm::PointerType>(Ty))
  569. return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
  570. // Convert the pointer to an integer so we can play with its width.
  571. Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
  572. }
  573. llvm::Type *DestIntTy = Ty;
  574. if (isa<llvm::PointerType>(DestIntTy))
  575. DestIntTy = CGF.IntPtrTy;
  576. if (Val->getType() != DestIntTy) {
  577. const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
  578. if (DL.isBigEndian()) {
  579. // Preserve the high bits on big-endian targets.
  580. // That is what memory coercion does.
  581. uint64_t SrcSize = DL.getTypeAllocSizeInBits(Val->getType());
  582. uint64_t DstSize = DL.getTypeAllocSizeInBits(DestIntTy);
  583. if (SrcSize > DstSize) {
  584. Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
  585. Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
  586. } else {
  587. Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
  588. Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
  589. }
  590. } else {
  591. // Little-endian targets preserve the low bits. No shifts required.
  592. Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
  593. }
  594. }
  595. if (isa<llvm::PointerType>(Ty))
  596. Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
  597. return Val;
  598. }
  599. /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
  600. /// a pointer to an object of type \arg Ty.
  601. ///
  602. /// This safely handles the case when the src type is smaller than the
  603. /// destination type; in this situation the values of bits which not
  604. /// present in the src are undefined.
  605. static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
  606. llvm::Type *Ty,
  607. CodeGenFunction &CGF) {
  608. llvm::Type *SrcTy =
  609. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  610. // If SrcTy and Ty are the same, just do a load.
  611. if (SrcTy == Ty)
  612. return CGF.Builder.CreateLoad(SrcPtr);
  613. uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
  614. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
  615. SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  616. SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  617. }
  618. uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
  619. // If the source and destination are integer or pointer types, just do an
  620. // extension or truncation to the desired type.
  621. if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
  622. (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
  623. llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
  624. return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
  625. }
  626. // If load is legal, just bitcast the src pointer.
  627. if (SrcSize >= DstSize) {
  628. // Generally SrcSize is never greater than DstSize, since this means we are
  629. // losing bits. However, this can happen in cases where the structure has
  630. // additional padding, for example due to a user specified alignment.
  631. //
  632. // FIXME: Assert that we aren't truncating non-padding bits when have access
  633. // to that information.
  634. llvm::Value *Casted =
  635. CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
  636. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
  637. // FIXME: Use better alignment / avoid requiring aligned load.
  638. Load->setAlignment(1);
  639. return Load;
  640. }
  641. // Otherwise do coercion through memory. This is stupid, but
  642. // simple.
  643. llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
  644. llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
  645. llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
  646. llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
  647. // FIXME: Use better alignment.
  648. CGF.Builder.CreateMemCpy(Casted, SrcCasted,
  649. llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
  650. 1, false);
  651. return CGF.Builder.CreateLoad(Tmp);
  652. }
  653. // Function to store a first-class aggregate into memory. We prefer to
  654. // store the elements rather than the aggregate to be more friendly to
  655. // fast-isel.
  656. // FIXME: Do we need to recurse here?
  657. static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
  658. llvm::Value *DestPtr, bool DestIsVolatile,
  659. bool LowAlignment) {
  660. // Prefer scalar stores to first-class aggregate stores.
  661. if (llvm::StructType *STy =
  662. dyn_cast<llvm::StructType>(Val->getType())) {
  663. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  664. llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
  665. llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
  666. llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
  667. DestIsVolatile);
  668. if (LowAlignment)
  669. SI->setAlignment(1);
  670. }
  671. } else {
  672. llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
  673. if (LowAlignment)
  674. SI->setAlignment(1);
  675. }
  676. }
  677. /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
  678. /// where the source and destination may have different types.
  679. ///
  680. /// This safely handles the case when the src type is larger than the
  681. /// destination type; the upper bits of the src will be lost.
  682. static void CreateCoercedStore(llvm::Value *Src,
  683. llvm::Value *DstPtr,
  684. bool DstIsVolatile,
  685. CodeGenFunction &CGF) {
  686. llvm::Type *SrcTy = Src->getType();
  687. llvm::Type *DstTy =
  688. cast<llvm::PointerType>(DstPtr->getType())->getElementType();
  689. if (SrcTy == DstTy) {
  690. CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
  691. return;
  692. }
  693. uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
  694. if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
  695. DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
  696. DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
  697. }
  698. // If the source and destination are integer or pointer types, just do an
  699. // extension or truncation to the desired type.
  700. if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
  701. (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
  702. Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
  703. CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
  704. return;
  705. }
  706. uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
  707. // If store is legal, just bitcast the src pointer.
  708. if (SrcSize <= DstSize) {
  709. llvm::Value *Casted =
  710. CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
  711. // FIXME: Use better alignment / avoid requiring aligned store.
  712. BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
  713. } else {
  714. // Otherwise do coercion through memory. This is stupid, but
  715. // simple.
  716. // Generally SrcSize is never greater than DstSize, since this means we are
  717. // losing bits. However, this can happen in cases where the structure has
  718. // additional padding, for example due to a user specified alignment.
  719. //
  720. // FIXME: Assert that we aren't truncating non-padding bits when have access
  721. // to that information.
  722. llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
  723. CGF.Builder.CreateStore(Src, Tmp);
  724. llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
  725. llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
  726. llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
  727. // FIXME: Use better alignment.
  728. CGF.Builder.CreateMemCpy(DstCasted, Casted,
  729. llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
  730. 1, false);
  731. }
  732. }
  733. /***/
  734. bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
  735. return FI.getReturnInfo().isIndirect();
  736. }
  737. bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
  738. if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
  739. switch (BT->getKind()) {
  740. default:
  741. return false;
  742. case BuiltinType::Float:
  743. return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
  744. case BuiltinType::Double:
  745. return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
  746. case BuiltinType::LongDouble:
  747. return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
  748. }
  749. }
  750. return false;
  751. }
  752. bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
  753. if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
  754. if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
  755. if (BT->getKind() == BuiltinType::LongDouble)
  756. return getTarget().useObjCFP2RetForComplexLongDouble();
  757. }
  758. }
  759. return false;
  760. }
  761. llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
  762. const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
  763. return GetFunctionType(FI);
  764. }
  765. llvm::FunctionType *
  766. CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
  767. bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
  768. assert(Inserted && "Recursively being processed?");
  769. SmallVector<llvm::Type*, 8> argTypes;
  770. llvm::Type *resultType = 0;
  771. const ABIArgInfo &retAI = FI.getReturnInfo();
  772. switch (retAI.getKind()) {
  773. case ABIArgInfo::Expand:
  774. llvm_unreachable("Invalid ABI kind for return argument");
  775. case ABIArgInfo::Extend:
  776. case ABIArgInfo::Direct:
  777. resultType = retAI.getCoerceToType();
  778. break;
  779. case ABIArgInfo::Indirect: {
  780. assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
  781. resultType = llvm::Type::getVoidTy(getLLVMContext());
  782. QualType ret = FI.getReturnType();
  783. llvm::Type *ty = ConvertType(ret);
  784. unsigned addressSpace = Context.getTargetAddressSpace(ret);
  785. argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
  786. break;
  787. }
  788. case ABIArgInfo::Ignore:
  789. resultType = llvm::Type::getVoidTy(getLLVMContext());
  790. break;
  791. }
  792. // Add in all of the required arguments.
  793. CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
  794. if (FI.isVariadic()) {
  795. ie = it + FI.getRequiredArgs().getNumRequiredArgs();
  796. } else {
  797. ie = FI.arg_end();
  798. }
  799. for (; it != ie; ++it) {
  800. const ABIArgInfo &argAI = it->info;
  801. // Insert a padding type to ensure proper alignment.
  802. if (llvm::Type *PaddingType = argAI.getPaddingType())
  803. argTypes.push_back(PaddingType);
  804. switch (argAI.getKind()) {
  805. case ABIArgInfo::Ignore:
  806. break;
  807. case ABIArgInfo::Indirect: {
  808. // indirect arguments are always on the stack, which is addr space #0.
  809. llvm::Type *LTy = ConvertTypeForMem(it->type);
  810. argTypes.push_back(LTy->getPointerTo());
  811. break;
  812. }
  813. case ABIArgInfo::Extend:
  814. case ABIArgInfo::Direct: {
  815. // If the coerce-to type is a first class aggregate, flatten it. Either
  816. // way is semantically identical, but fast-isel and the optimizer
  817. // generally likes scalar values better than FCAs.
  818. llvm::Type *argType = argAI.getCoerceToType();
  819. if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
  820. for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
  821. argTypes.push_back(st->getElementType(i));
  822. } else {
  823. argTypes.push_back(argType);
  824. }
  825. break;
  826. }
  827. case ABIArgInfo::Expand:
  828. GetExpandedTypes(it->type, argTypes);
  829. break;
  830. }
  831. }
  832. bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
  833. assert(Erased && "Not in set?");
  834. return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
  835. }
  836. llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
  837. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  838. const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  839. if (!isFuncTypeConvertible(FPT))
  840. return llvm::StructType::get(getLLVMContext());
  841. const CGFunctionInfo *Info;
  842. if (isa<CXXDestructorDecl>(MD))
  843. Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
  844. else
  845. Info = &arrangeCXXMethodDeclaration(MD);
  846. return GetFunctionType(*Info);
  847. }
  848. void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
  849. const Decl *TargetDecl,
  850. AttributeListType &PAL,
  851. unsigned &CallingConv,
  852. bool AttrOnCallSite) {
  853. llvm::AttrBuilder FuncAttrs;
  854. llvm::AttrBuilder RetAttrs;
  855. CallingConv = FI.getEffectiveCallingConvention();
  856. if (FI.isNoReturn())
  857. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  858. // FIXME: handle sseregparm someday...
  859. if (TargetDecl) {
  860. if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
  861. FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
  862. if (TargetDecl->hasAttr<NoThrowAttr>())
  863. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  864. if (TargetDecl->hasAttr<NoReturnAttr>())
  865. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  866. if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
  867. const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
  868. if (FPT && FPT->isNothrow(getContext()))
  869. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  870. // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
  871. // These attributes are not inherited by overloads.
  872. const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
  873. if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
  874. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  875. }
  876. // 'const' and 'pure' attribute functions are also nounwind.
  877. if (TargetDecl->hasAttr<ConstAttr>()) {
  878. FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
  879. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  880. } else if (TargetDecl->hasAttr<PureAttr>()) {
  881. FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
  882. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  883. }
  884. if (TargetDecl->hasAttr<MallocAttr>())
  885. RetAttrs.addAttribute(llvm::Attribute::NoAlias);
  886. }
  887. if (CodeGenOpts.OptimizeSize)
  888. FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
  889. if (CodeGenOpts.OptimizeSize == 2)
  890. FuncAttrs.addAttribute(llvm::Attribute::MinSize);
  891. if (CodeGenOpts.DisableRedZone)
  892. FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
  893. if (CodeGenOpts.NoImplicitFloat)
  894. FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
  895. if (AttrOnCallSite) {
  896. // Attributes that should go on the call site only.
  897. if (!CodeGenOpts.SimplifyLibCalls)
  898. FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
  899. } else {
  900. // Attributes that should go on the function, but not the call site.
  901. if (!CodeGenOpts.DisableFPElim) {
  902. FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
  903. FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "false");
  904. } else if (CodeGenOpts.OmitLeafFramePointer) {
  905. FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
  906. FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
  907. } else {
  908. FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
  909. FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
  910. }
  911. FuncAttrs.addAttribute("less-precise-fpmad",
  912. llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
  913. FuncAttrs.addAttribute("no-infs-fp-math",
  914. llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
  915. FuncAttrs.addAttribute("no-nans-fp-math",
  916. llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
  917. FuncAttrs.addAttribute("unsafe-fp-math",
  918. llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
  919. FuncAttrs.addAttribute("use-soft-float",
  920. llvm::toStringRef(CodeGenOpts.SoftFloat));
  921. FuncAttrs.addAttribute("stack-protector-buffer-size",
  922. llvm::utostr(CodeGenOpts.SSPBufferSize));
  923. bool NoFramePointerElimNonLeaf;
  924. if (!CodeGenOpts.DisableFPElim) {
  925. NoFramePointerElimNonLeaf = false;
  926. } else if (CodeGenOpts.OmitLeafFramePointer) {
  927. NoFramePointerElimNonLeaf = true;
  928. } else {
  929. NoFramePointerElimNonLeaf = true;
  930. }
  931. FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf",
  932. llvm::toStringRef(NoFramePointerElimNonLeaf));
  933. if (!CodeGenOpts.StackRealignment)
  934. FuncAttrs.addAttribute("no-realign-stack");
  935. }
  936. QualType RetTy = FI.getReturnType();
  937. unsigned Index = 1;
  938. const ABIArgInfo &RetAI = FI.getReturnInfo();
  939. switch (RetAI.getKind()) {
  940. case ABIArgInfo::Extend:
  941. if (RetTy->hasSignedIntegerRepresentation())
  942. RetAttrs.addAttribute(llvm::Attribute::SExt);
  943. else if (RetTy->hasUnsignedIntegerRepresentation())
  944. RetAttrs.addAttribute(llvm::Attribute::ZExt);
  945. // FALL THROUGH
  946. case ABIArgInfo::Direct:
  947. if (RetAI.getInReg())
  948. RetAttrs.addAttribute(llvm::Attribute::InReg);
  949. break;
  950. case ABIArgInfo::Ignore:
  951. break;
  952. case ABIArgInfo::Indirect: {
  953. llvm::AttrBuilder SRETAttrs;
  954. SRETAttrs.addAttribute(llvm::Attribute::StructRet);
  955. if (RetAI.getInReg())
  956. SRETAttrs.addAttribute(llvm::Attribute::InReg);
  957. PAL.push_back(llvm::
  958. AttributeSet::get(getLLVMContext(), Index, SRETAttrs));
  959. ++Index;
  960. // sret disables readnone and readonly
  961. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
  962. .removeAttribute(llvm::Attribute::ReadNone);
  963. break;
  964. }
  965. case ABIArgInfo::Expand:
  966. llvm_unreachable("Invalid ABI kind for return argument");
  967. }
  968. if (RetAttrs.hasAttributes())
  969. PAL.push_back(llvm::
  970. AttributeSet::get(getLLVMContext(),
  971. llvm::AttributeSet::ReturnIndex,
  972. RetAttrs));
  973. for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
  974. ie = FI.arg_end(); it != ie; ++it) {
  975. QualType ParamType = it->type;
  976. const ABIArgInfo &AI = it->info;
  977. llvm::AttrBuilder Attrs;
  978. if (AI.getPaddingType()) {
  979. if (AI.getPaddingInReg())
  980. PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
  981. llvm::Attribute::InReg));
  982. // Increment Index if there is padding.
  983. ++Index;
  984. }
  985. // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
  986. // have the corresponding parameter variable. It doesn't make
  987. // sense to do it here because parameters are so messed up.
  988. switch (AI.getKind()) {
  989. case ABIArgInfo::Extend:
  990. if (ParamType->isSignedIntegerOrEnumerationType())
  991. Attrs.addAttribute(llvm::Attribute::SExt);
  992. else if (ParamType->isUnsignedIntegerOrEnumerationType())
  993. Attrs.addAttribute(llvm::Attribute::ZExt);
  994. // FALL THROUGH
  995. case ABIArgInfo::Direct:
  996. if (AI.getInReg())
  997. Attrs.addAttribute(llvm::Attribute::InReg);
  998. // FIXME: handle sseregparm someday...
  999. if (llvm::StructType *STy =
  1000. dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
  1001. unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
  1002. if (Attrs.hasAttributes())
  1003. for (unsigned I = 0; I < Extra; ++I)
  1004. PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
  1005. Attrs));
  1006. Index += Extra;
  1007. }
  1008. break;
  1009. case ABIArgInfo::Indirect:
  1010. if (AI.getInReg())
  1011. Attrs.addAttribute(llvm::Attribute::InReg);
  1012. if (AI.getIndirectByVal())
  1013. Attrs.addAttribute(llvm::Attribute::ByVal);
  1014. Attrs.addAlignmentAttr(AI.getIndirectAlign());
  1015. // byval disables readnone and readonly.
  1016. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
  1017. .removeAttribute(llvm::Attribute::ReadNone);
  1018. break;
  1019. case ABIArgInfo::Ignore:
  1020. // Skip increment, no matching LLVM parameter.
  1021. continue;
  1022. case ABIArgInfo::Expand: {
  1023. SmallVector<llvm::Type*, 8> types;
  1024. // FIXME: This is rather inefficient. Do we ever actually need to do
  1025. // anything here? The result should be just reconstructed on the other
  1026. // side, so extension should be a non-issue.
  1027. getTypes().GetExpandedTypes(ParamType, types);
  1028. Index += types.size();
  1029. continue;
  1030. }
  1031. }
  1032. if (Attrs.hasAttributes())
  1033. PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
  1034. ++Index;
  1035. }
  1036. if (FuncAttrs.hasAttributes())
  1037. PAL.push_back(llvm::
  1038. AttributeSet::get(getLLVMContext(),
  1039. llvm::AttributeSet::FunctionIndex,
  1040. FuncAttrs));
  1041. }
  1042. /// An argument came in as a promoted argument; demote it back to its
  1043. /// declared type.
  1044. static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
  1045. const VarDecl *var,
  1046. llvm::Value *value) {
  1047. llvm::Type *varType = CGF.ConvertType(var->getType());
  1048. // This can happen with promotions that actually don't change the
  1049. // underlying type, like the enum promotions.
  1050. if (value->getType() == varType) return value;
  1051. assert((varType->isIntegerTy() || varType->isFloatingPointTy())
  1052. && "unexpected promotion type");
  1053. if (isa<llvm::IntegerType>(varType))
  1054. return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
  1055. return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
  1056. }
  1057. void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
  1058. llvm::Function *Fn,
  1059. const FunctionArgList &Args) {
  1060. // If this is an implicit-return-zero function, go ahead and
  1061. // initialize the return value. TODO: it might be nice to have
  1062. // a more general mechanism for this that didn't require synthesized
  1063. // return statements.
  1064. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
  1065. if (FD->hasImplicitReturnZero()) {
  1066. QualType RetTy = FD->getResultType().getUnqualifiedType();
  1067. llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
  1068. llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
  1069. Builder.CreateStore(Zero, ReturnValue);
  1070. }
  1071. }
  1072. // FIXME: We no longer need the types from FunctionArgList; lift up and
  1073. // simplify.
  1074. // Emit allocs for param decls. Give the LLVM Argument nodes names.
  1075. llvm::Function::arg_iterator AI = Fn->arg_begin();
  1076. // Name the struct return argument.
  1077. if (CGM.ReturnTypeUsesSRet(FI)) {
  1078. AI->setName("agg.result");
  1079. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
  1080. AI->getArgNo() + 1,
  1081. llvm::Attribute::NoAlias));
  1082. ++AI;
  1083. }
  1084. assert(FI.arg_size() == Args.size() &&
  1085. "Mismatch between function signature & arguments.");
  1086. unsigned ArgNo = 1;
  1087. CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
  1088. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  1089. i != e; ++i, ++info_it, ++ArgNo) {
  1090. const VarDecl *Arg = *i;
  1091. QualType Ty = info_it->type;
  1092. const ABIArgInfo &ArgI = info_it->info;
  1093. bool isPromoted =
  1094. isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
  1095. // Skip the dummy padding argument.
  1096. if (ArgI.getPaddingType())
  1097. ++AI;
  1098. switch (ArgI.getKind()) {
  1099. case ABIArgInfo::Indirect: {
  1100. llvm::Value *V = AI;
  1101. if (!hasScalarEvaluationKind(Ty)) {
  1102. // Aggregates and complex variables are accessed by reference. All we
  1103. // need to do is realign the value, if requested
  1104. if (ArgI.getIndirectRealign()) {
  1105. llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
  1106. // Copy from the incoming argument pointer to the temporary with the
  1107. // appropriate alignment.
  1108. //
  1109. // FIXME: We should have a common utility for generating an aggregate
  1110. // copy.
  1111. llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
  1112. CharUnits Size = getContext().getTypeSizeInChars(Ty);
  1113. llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
  1114. llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
  1115. Builder.CreateMemCpy(Dst,
  1116. Src,
  1117. llvm::ConstantInt::get(IntPtrTy,
  1118. Size.getQuantity()),
  1119. ArgI.getIndirectAlign(),
  1120. false);
  1121. V = AlignedTemp;
  1122. }
  1123. } else {
  1124. // Load scalar value from indirect argument.
  1125. CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
  1126. V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
  1127. if (isPromoted)
  1128. V = emitArgumentDemotion(*this, Arg, V);
  1129. }
  1130. EmitParmDecl(*Arg, V, ArgNo);
  1131. break;
  1132. }
  1133. case ABIArgInfo::Extend:
  1134. case ABIArgInfo::Direct: {
  1135. // If we have the trivial case, handle it with no muss and fuss.
  1136. if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
  1137. ArgI.getCoerceToType() == ConvertType(Ty) &&
  1138. ArgI.getDirectOffset() == 0) {
  1139. assert(AI != Fn->arg_end() && "Argument mismatch!");
  1140. llvm::Value *V = AI;
  1141. if (Arg->getType().isRestrictQualified())
  1142. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
  1143. AI->getArgNo() + 1,
  1144. llvm::Attribute::NoAlias));
  1145. // Ensure the argument is the correct type.
  1146. if (V->getType() != ArgI.getCoerceToType())
  1147. V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
  1148. if (isPromoted)
  1149. V = emitArgumentDemotion(*this, Arg, V);
  1150. // Because of merging of function types from multiple decls it is
  1151. // possible for the type of an argument to not match the corresponding
  1152. // type in the function type. Since we are codegening the callee
  1153. // in here, add a cast to the argument type.
  1154. llvm::Type *LTy = ConvertType(Arg->getType());
  1155. if (V->getType() != LTy)
  1156. V = Builder.CreateBitCast(V, LTy);
  1157. EmitParmDecl(*Arg, V, ArgNo);
  1158. break;
  1159. }
  1160. llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
  1161. // The alignment we need to use is the max of the requested alignment for
  1162. // the argument plus the alignment required by our access code below.
  1163. unsigned AlignmentToUse =
  1164. CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
  1165. AlignmentToUse = std::max(AlignmentToUse,
  1166. (unsigned)getContext().getDeclAlign(Arg).getQuantity());
  1167. Alloca->setAlignment(AlignmentToUse);
  1168. llvm::Value *V = Alloca;
  1169. llvm::Value *Ptr = V; // Pointer to store into.
  1170. // If the value is offset in memory, apply the offset now.
  1171. if (unsigned Offs = ArgI.getDirectOffset()) {
  1172. Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
  1173. Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
  1174. Ptr = Builder.CreateBitCast(Ptr,
  1175. llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
  1176. }
  1177. // If the coerce-to type is a first class aggregate, we flatten it and
  1178. // pass the elements. Either way is semantically identical, but fast-isel
  1179. // and the optimizer generally likes scalar values better than FCAs.
  1180. llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
  1181. if (STy && STy->getNumElements() > 1) {
  1182. uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
  1183. llvm::Type *DstTy =
  1184. cast<llvm::PointerType>(Ptr->getType())->getElementType();
  1185. uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
  1186. if (SrcSize <= DstSize) {
  1187. Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
  1188. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  1189. assert(AI != Fn->arg_end() && "Argument mismatch!");
  1190. AI->setName(Arg->getName() + ".coerce" + Twine(i));
  1191. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
  1192. Builder.CreateStore(AI++, EltPtr);
  1193. }
  1194. } else {
  1195. llvm::AllocaInst *TempAlloca =
  1196. CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
  1197. TempAlloca->setAlignment(AlignmentToUse);
  1198. llvm::Value *TempV = TempAlloca;
  1199. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  1200. assert(AI != Fn->arg_end() && "Argument mismatch!");
  1201. AI->setName(Arg->getName() + ".coerce" + Twine(i));
  1202. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
  1203. Builder.CreateStore(AI++, EltPtr);
  1204. }
  1205. Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
  1206. }
  1207. } else {
  1208. // Simple case, just do a coerced store of the argument into the alloca.
  1209. assert(AI != Fn->arg_end() && "Argument mismatch!");
  1210. AI->setName(Arg->getName() + ".coerce");
  1211. CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
  1212. }
  1213. // Match to what EmitParmDecl is expecting for this type.
  1214. if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
  1215. V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
  1216. if (isPromoted)
  1217. V = emitArgumentDemotion(*this, Arg, V);
  1218. }
  1219. EmitParmDecl(*Arg, V, ArgNo);
  1220. continue; // Skip ++AI increment, already done.
  1221. }
  1222. case ABIArgInfo::Expand: {
  1223. // If this structure was expanded into multiple arguments then
  1224. // we need to create a temporary and reconstruct it from the
  1225. // arguments.
  1226. llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
  1227. CharUnits Align = getContext().getDeclAlign(Arg);
  1228. Alloca->setAlignment(Align.getQuantity());
  1229. LValue LV = MakeAddrLValue(Alloca, Ty, Align);
  1230. llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
  1231. EmitParmDecl(*Arg, Alloca, ArgNo);
  1232. // Name the arguments used in expansion and increment AI.
  1233. unsigned Index = 0;
  1234. for (; AI != End; ++AI, ++Index)
  1235. AI->setName(Arg->getName() + "." + Twine(Index));
  1236. continue;
  1237. }
  1238. case ABIArgInfo::Ignore:
  1239. // Initialize the local variable appropriately.
  1240. if (!hasScalarEvaluationKind(Ty))
  1241. EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
  1242. else
  1243. EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
  1244. ArgNo);
  1245. // Skip increment, no matching LLVM parameter.
  1246. continue;
  1247. }
  1248. ++AI;
  1249. }
  1250. assert(AI == Fn->arg_end() && "Argument mismatch!");
  1251. }
  1252. static void eraseUnusedBitCasts(llvm::Instruction *insn) {
  1253. while (insn->use_empty()) {
  1254. llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
  1255. if (!bitcast) return;
  1256. // This is "safe" because we would have used a ConstantExpr otherwise.
  1257. insn = cast<llvm::Instruction>(bitcast->getOperand(0));
  1258. bitcast->eraseFromParent();
  1259. }
  1260. }
  1261. /// Try to emit a fused autorelease of a return result.
  1262. static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
  1263. llvm::Value *result) {
  1264. // We must be immediately followed the cast.
  1265. llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
  1266. if (BB->empty()) return 0;
  1267. if (&BB->back() != result) return 0;
  1268. llvm::Type *resultType = result->getType();
  1269. // result is in a BasicBlock and is therefore an Instruction.
  1270. llvm::Instruction *generator = cast<llvm::Instruction>(result);
  1271. SmallVector<llvm::Instruction*,4> insnsToKill;
  1272. // Look for:
  1273. // %generator = bitcast %type1* %generator2 to %type2*
  1274. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
  1275. // We would have emitted this as a constant if the operand weren't
  1276. // an Instruction.
  1277. generator = cast<llvm::Instruction>(bitcast->getOperand(0));
  1278. // Require the generator to be immediately followed by the cast.
  1279. if (generator->getNextNode() != bitcast)
  1280. return 0;
  1281. insnsToKill.push_back(bitcast);
  1282. }
  1283. // Look for:
  1284. // %generator = call i8* @objc_retain(i8* %originalResult)
  1285. // or
  1286. // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
  1287. llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
  1288. if (!call) return 0;
  1289. bool doRetainAutorelease;
  1290. if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
  1291. doRetainAutorelease = true;
  1292. } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
  1293. .objc_retainAutoreleasedReturnValue) {
  1294. doRetainAutorelease = false;
  1295. // If we emitted an assembly marker for this call (and the
  1296. // ARCEntrypoints field should have been set if so), go looking
  1297. // for that call. If we can't find it, we can't do this
  1298. // optimization. But it should always be the immediately previous
  1299. // instruction, unless we needed bitcasts around the call.
  1300. if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
  1301. llvm::Instruction *prev = call->getPrevNode();
  1302. assert(prev);
  1303. if (isa<llvm::BitCastInst>(prev)) {
  1304. prev = prev->getPrevNode();
  1305. assert(prev);
  1306. }
  1307. assert(isa<llvm::CallInst>(prev));
  1308. assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
  1309. CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
  1310. insnsToKill.push_back(prev);
  1311. }
  1312. } else {
  1313. return 0;
  1314. }
  1315. result = call->getArgOperand(0);
  1316. insnsToKill.push_back(call);
  1317. // Keep killing bitcasts, for sanity. Note that we no longer care
  1318. // about precise ordering as long as there's exactly one use.
  1319. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
  1320. if (!bitcast->hasOneUse()) break;
  1321. insnsToKill.push_back(bitcast);
  1322. result = bitcast->getOperand(0);
  1323. }
  1324. // Delete all the unnecessary instructions, from latest to earliest.
  1325. for (SmallVectorImpl<llvm::Instruction*>::iterator
  1326. i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
  1327. (*i)->eraseFromParent();
  1328. // Do the fused retain/autorelease if we were asked to.
  1329. if (doRetainAutorelease)
  1330. result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
  1331. // Cast back to the result type.
  1332. return CGF.Builder.CreateBitCast(result, resultType);
  1333. }
  1334. /// If this is a +1 of the value of an immutable 'self', remove it.
  1335. static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
  1336. llvm::Value *result) {
  1337. // This is only applicable to a method with an immutable 'self'.
  1338. const ObjCMethodDecl *method =
  1339. dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
  1340. if (!method) return 0;
  1341. const VarDecl *self = method->getSelfDecl();
  1342. if (!self->getType().isConstQualified()) return 0;
  1343. // Look for a retain call.
  1344. llvm::CallInst *retainCall =
  1345. dyn_cast<llvm::CallInst>(result->stripPointerCasts());
  1346. if (!retainCall ||
  1347. retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
  1348. return 0;
  1349. // Look for an ordinary load of 'self'.
  1350. llvm::Value *retainedValue = retainCall->getArgOperand(0);
  1351. llvm::LoadInst *load =
  1352. dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
  1353. if (!load || load->isAtomic() || load->isVolatile() ||
  1354. load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
  1355. return 0;
  1356. // Okay! Burn it all down. This relies for correctness on the
  1357. // assumption that the retain is emitted as part of the return and
  1358. // that thereafter everything is used "linearly".
  1359. llvm::Type *resultType = result->getType();
  1360. eraseUnusedBitCasts(cast<llvm::Instruction>(result));
  1361. assert(retainCall->use_empty());
  1362. retainCall->eraseFromParent();
  1363. eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
  1364. return CGF.Builder.CreateBitCast(load, resultType);
  1365. }
  1366. /// Emit an ARC autorelease of the result of a function.
  1367. ///
  1368. /// \return the value to actually return from the function
  1369. static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
  1370. llvm::Value *result) {
  1371. // If we're returning 'self', kill the initial retain. This is a
  1372. // heuristic attempt to "encourage correctness" in the really unfortunate
  1373. // case where we have a return of self during a dealloc and we desperately
  1374. // need to avoid the possible autorelease.
  1375. if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
  1376. return self;
  1377. // At -O0, try to emit a fused retain/autorelease.
  1378. if (CGF.shouldUseFusedARCCalls())
  1379. if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
  1380. return fused;
  1381. return CGF.EmitARCAutoreleaseReturnValue(result);
  1382. }
  1383. /// Heuristically search for a dominating store to the return-value slot.
  1384. static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
  1385. // If there are multiple uses of the return-value slot, just check
  1386. // for something immediately preceding the IP. Sometimes this can
  1387. // happen with how we generate implicit-returns; it can also happen
  1388. // with noreturn cleanups.
  1389. if (!CGF.ReturnValue->hasOneUse()) {
  1390. llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
  1391. if (IP->empty()) return 0;
  1392. llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
  1393. if (!store) return 0;
  1394. if (store->getPointerOperand() != CGF.ReturnValue) return 0;
  1395. assert(!store->isAtomic() && !store->isVolatile()); // see below
  1396. return store;
  1397. }
  1398. llvm::StoreInst *store =
  1399. dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
  1400. if (!store) return 0;
  1401. // These aren't actually possible for non-coerced returns, and we
  1402. // only care about non-coerced returns on this code path.
  1403. assert(!store->isAtomic() && !store->isVolatile());
  1404. // Now do a first-and-dirty dominance check: just walk up the
  1405. // single-predecessors chain from the current insertion point.
  1406. llvm::BasicBlock *StoreBB = store->getParent();
  1407. llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
  1408. while (IP != StoreBB) {
  1409. if (!(IP = IP->getSinglePredecessor()))
  1410. return 0;
  1411. }
  1412. // Okay, the store's basic block dominates the insertion point; we
  1413. // can do our thing.
  1414. return store;
  1415. }
  1416. void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
  1417. bool EmitRetDbgLoc) {
  1418. // Functions with no result always return void.
  1419. if (ReturnValue == 0) {
  1420. Builder.CreateRetVoid();
  1421. return;
  1422. }
  1423. llvm::DebugLoc RetDbgLoc;
  1424. llvm::Value *RV = 0;
  1425. QualType RetTy = FI.getReturnType();
  1426. const ABIArgInfo &RetAI = FI.getReturnInfo();
  1427. switch (RetAI.getKind()) {
  1428. case ABIArgInfo::Indirect: {
  1429. switch (getEvaluationKind(RetTy)) {
  1430. case TEK_Complex: {
  1431. ComplexPairTy RT =
  1432. EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy));
  1433. EmitStoreOfComplex(RT,
  1434. MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
  1435. /*isInit*/ true);
  1436. break;
  1437. }
  1438. case TEK_Aggregate:
  1439. // Do nothing; aggregrates get evaluated directly into the destination.
  1440. break;
  1441. case TEK_Scalar:
  1442. EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
  1443. MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
  1444. /*isInit*/ true);
  1445. break;
  1446. }
  1447. break;
  1448. }
  1449. case ABIArgInfo::Extend:
  1450. case ABIArgInfo::Direct:
  1451. if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
  1452. RetAI.getDirectOffset() == 0) {
  1453. // The internal return value temp always will have pointer-to-return-type
  1454. // type, just do a load.
  1455. // If there is a dominating store to ReturnValue, we can elide
  1456. // the load, zap the store, and usually zap the alloca.
  1457. if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
  1458. // Reuse the debug location from the store unless there is
  1459. // cleanup code to be emitted between the store and return
  1460. // instruction.
  1461. if (EmitRetDbgLoc && !AutoreleaseResult)
  1462. RetDbgLoc = SI->getDebugLoc();
  1463. // Get the stored value and nuke the now-dead store.
  1464. RV = SI->getValueOperand();
  1465. SI->eraseFromParent();
  1466. // If that was the only use of the return value, nuke it as well now.
  1467. if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
  1468. cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
  1469. ReturnValue = 0;
  1470. }
  1471. // Otherwise, we have to do a simple load.
  1472. } else {
  1473. RV = Builder.CreateLoad(ReturnValue);
  1474. }
  1475. } else {
  1476. llvm::Value *V = ReturnValue;
  1477. // If the value is offset in memory, apply the offset now.
  1478. if (unsigned Offs = RetAI.getDirectOffset()) {
  1479. V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
  1480. V = Builder.CreateConstGEP1_32(V, Offs);
  1481. V = Builder.CreateBitCast(V,
  1482. llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
  1483. }
  1484. RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
  1485. }
  1486. // In ARC, end functions that return a retainable type with a call
  1487. // to objc_autoreleaseReturnValue.
  1488. if (AutoreleaseResult) {
  1489. assert(getLangOpts().ObjCAutoRefCount &&
  1490. !FI.isReturnsRetained() &&
  1491. RetTy->isObjCRetainableType());
  1492. RV = emitAutoreleaseOfResult(*this, RV);
  1493. }
  1494. break;
  1495. case ABIArgInfo::Ignore:
  1496. break;
  1497. case ABIArgInfo::Expand:
  1498. llvm_unreachable("Invalid ABI kind for return argument");
  1499. }
  1500. llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
  1501. if (!RetDbgLoc.isUnknown())
  1502. Ret->setDebugLoc(RetDbgLoc);
  1503. }
  1504. void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
  1505. const VarDecl *param) {
  1506. // StartFunction converted the ABI-lowered parameter(s) into a
  1507. // local alloca. We need to turn that into an r-value suitable
  1508. // for EmitCall.
  1509. llvm::Value *local = GetAddrOfLocalVar(param);
  1510. QualType type = param->getType();
  1511. // For the most part, we just need to load the alloca, except:
  1512. // 1) aggregate r-values are actually pointers to temporaries, and
  1513. // 2) references to non-scalars are pointers directly to the aggregate.
  1514. // I don't know why references to scalars are different here.
  1515. if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
  1516. if (!hasScalarEvaluationKind(ref->getPointeeType()))
  1517. return args.add(RValue::getAggregate(local), type);
  1518. // Locals which are references to scalars are represented
  1519. // with allocas holding the pointer.
  1520. return args.add(RValue::get(Builder.CreateLoad(local)), type);
  1521. }
  1522. args.add(convertTempToRValue(local, type), type);
  1523. }
  1524. static bool isProvablyNull(llvm::Value *addr) {
  1525. return isa<llvm::ConstantPointerNull>(addr);
  1526. }
  1527. static bool isProvablyNonNull(llvm::Value *addr) {
  1528. return isa<llvm::AllocaInst>(addr);
  1529. }
  1530. /// Emit the actual writing-back of a writeback.
  1531. static void emitWriteback(CodeGenFunction &CGF,
  1532. const CallArgList::Writeback &writeback) {
  1533. const LValue &srcLV = writeback.Source;
  1534. llvm::Value *srcAddr = srcLV.getAddress();
  1535. assert(!isProvablyNull(srcAddr) &&
  1536. "shouldn't have writeback for provably null argument");
  1537. llvm::BasicBlock *contBB = 0;
  1538. // If the argument wasn't provably non-null, we need to null check
  1539. // before doing the store.
  1540. bool provablyNonNull = isProvablyNonNull(srcAddr);
  1541. if (!provablyNonNull) {
  1542. llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
  1543. contBB = CGF.createBasicBlock("icr.done");
  1544. llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
  1545. CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
  1546. CGF.EmitBlock(writebackBB);
  1547. }
  1548. // Load the value to writeback.
  1549. llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
  1550. // Cast it back, in case we're writing an id to a Foo* or something.
  1551. value = CGF.Builder.CreateBitCast(value,
  1552. cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
  1553. "icr.writeback-cast");
  1554. // Perform the writeback.
  1555. // If we have a "to use" value, it's something we need to emit a use
  1556. // of. This has to be carefully threaded in: if it's done after the
  1557. // release it's potentially undefined behavior (and the optimizer
  1558. // will ignore it), and if it happens before the retain then the
  1559. // optimizer could move the release there.
  1560. if (writeback.ToUse) {
  1561. assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
  1562. // Retain the new value. No need to block-copy here: the block's
  1563. // being passed up the stack.
  1564. value = CGF.EmitARCRetainNonBlock(value);
  1565. // Emit the intrinsic use here.
  1566. CGF.EmitARCIntrinsicUse(writeback.ToUse);
  1567. // Load the old value (primitively).
  1568. llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV);
  1569. // Put the new value in place (primitively).
  1570. CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
  1571. // Release the old value.
  1572. CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
  1573. // Otherwise, we can just do a normal lvalue store.
  1574. } else {
  1575. CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
  1576. }
  1577. // Jump to the continuation block.
  1578. if (!provablyNonNull)
  1579. CGF.EmitBlock(contBB);
  1580. }
  1581. static void emitWritebacks(CodeGenFunction &CGF,
  1582. const CallArgList &args) {
  1583. for (CallArgList::writeback_iterator
  1584. i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
  1585. emitWriteback(CGF, *i);
  1586. }
  1587. static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
  1588. const CallArgList &CallArgs) {
  1589. assert(CGF.getTarget().getCXXABI().isArgumentDestroyedByCallee());
  1590. ArrayRef<CallArgList::CallArgCleanup> Cleanups =
  1591. CallArgs.getCleanupsToDeactivate();
  1592. // Iterate in reverse to increase the likelihood of popping the cleanup.
  1593. for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
  1594. I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
  1595. CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
  1596. I->IsActiveIP->eraseFromParent();
  1597. }
  1598. }
  1599. static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
  1600. if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
  1601. if (uop->getOpcode() == UO_AddrOf)
  1602. return uop->getSubExpr();
  1603. return 0;
  1604. }
  1605. /// Emit an argument that's being passed call-by-writeback. That is,
  1606. /// we are passing the address of
  1607. static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
  1608. const ObjCIndirectCopyRestoreExpr *CRE) {
  1609. LValue srcLV;
  1610. // Make an optimistic effort to emit the address as an l-value.
  1611. // This can fail if the the argument expression is more complicated.
  1612. if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
  1613. srcLV = CGF.EmitLValue(lvExpr);
  1614. // Otherwise, just emit it as a scalar.
  1615. } else {
  1616. llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
  1617. QualType srcAddrType =
  1618. CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
  1619. srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
  1620. }
  1621. llvm::Value *srcAddr = srcLV.getAddress();
  1622. // The dest and src types don't necessarily match in LLVM terms
  1623. // because of the crazy ObjC compatibility rules.
  1624. llvm::PointerType *destType =
  1625. cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
  1626. // If the address is a constant null, just pass the appropriate null.
  1627. if (isProvablyNull(srcAddr)) {
  1628. args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
  1629. CRE->getType());
  1630. return;
  1631. }
  1632. // Create the temporary.
  1633. llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
  1634. "icr.temp");
  1635. // Loading an l-value can introduce a cleanup if the l-value is __weak,
  1636. // and that cleanup will be conditional if we can't prove that the l-value
  1637. // isn't null, so we need to register a dominating point so that the cleanups
  1638. // system will make valid IR.
  1639. CodeGenFunction::ConditionalEvaluation condEval(CGF);
  1640. // Zero-initialize it if we're not doing a copy-initialization.
  1641. bool shouldCopy = CRE->shouldCopy();
  1642. if (!shouldCopy) {
  1643. llvm::Value *null =
  1644. llvm::ConstantPointerNull::get(
  1645. cast<llvm::PointerType>(destType->getElementType()));
  1646. CGF.Builder.CreateStore(null, temp);
  1647. }
  1648. llvm::BasicBlock *contBB = 0;
  1649. llvm::BasicBlock *originBB = 0;
  1650. // If the address is *not* known to be non-null, we need to switch.
  1651. llvm::Value *finalArgument;
  1652. bool provablyNonNull = isProvablyNonNull(srcAddr);
  1653. if (provablyNonNull) {
  1654. finalArgument = temp;
  1655. } else {
  1656. llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
  1657. finalArgument = CGF.Builder.CreateSelect(isNull,
  1658. llvm::ConstantPointerNull::get(destType),
  1659. temp, "icr.argument");
  1660. // If we need to copy, then the load has to be conditional, which
  1661. // means we need control flow.
  1662. if (shouldCopy) {
  1663. originBB = CGF.Builder.GetInsertBlock();
  1664. contBB = CGF.createBasicBlock("icr.cont");
  1665. llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
  1666. CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
  1667. CGF.EmitBlock(copyBB);
  1668. condEval.begin(CGF);
  1669. }
  1670. }
  1671. llvm::Value *valueToUse = 0;
  1672. // Perform a copy if necessary.
  1673. if (shouldCopy) {
  1674. RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
  1675. assert(srcRV.isScalar());
  1676. llvm::Value *src = srcRV.getScalarVal();
  1677. src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
  1678. "icr.cast");
  1679. // Use an ordinary store, not a store-to-lvalue.
  1680. CGF.Builder.CreateStore(src, temp);
  1681. // If optimization is enabled, and the value was held in a
  1682. // __strong variable, we need to tell the optimizer that this
  1683. // value has to stay alive until we're doing the store back.
  1684. // This is because the temporary is effectively unretained,
  1685. // and so otherwise we can violate the high-level semantics.
  1686. if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
  1687. srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
  1688. valueToUse = src;
  1689. }
  1690. }
  1691. // Finish the control flow if we needed it.
  1692. if (shouldCopy && !provablyNonNull) {
  1693. llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
  1694. CGF.EmitBlock(contBB);
  1695. // Make a phi for the value to intrinsically use.
  1696. if (valueToUse) {
  1697. llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
  1698. "icr.to-use");
  1699. phiToUse->addIncoming(valueToUse, copyBB);
  1700. phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
  1701. originBB);
  1702. valueToUse = phiToUse;
  1703. }
  1704. condEval.end(CGF);
  1705. }
  1706. args.addWriteback(srcLV, temp, valueToUse);
  1707. args.add(RValue::get(finalArgument), CRE->getType());
  1708. }
  1709. void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
  1710. QualType type) {
  1711. if (const ObjCIndirectCopyRestoreExpr *CRE
  1712. = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
  1713. assert(getLangOpts().ObjCAutoRefCount);
  1714. assert(getContext().hasSameType(E->getType(), type));
  1715. return emitWritebackArg(*this, args, CRE);
  1716. }
  1717. assert(type->isReferenceType() == E->isGLValue() &&
  1718. "reference binding to unmaterialized r-value!");
  1719. if (E->isGLValue()) {
  1720. assert(E->getObjectKind() == OK_Ordinary);
  1721. return args.add(EmitReferenceBindingToExpr(E), type);
  1722. }
  1723. bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
  1724. // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
  1725. // However, we still have to push an EH-only cleanup in case we unwind before
  1726. // we make it to the call.
  1727. if (HasAggregateEvalKind &&
  1728. CGM.getTarget().getCXXABI().isArgumentDestroyedByCallee()) {
  1729. const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
  1730. if (RD && RD->hasNonTrivialDestructor()) {
  1731. AggValueSlot Slot = CreateAggTemp(type, "agg.arg.tmp");
  1732. Slot.setExternallyDestructed();
  1733. EmitAggExpr(E, Slot);
  1734. RValue RV = Slot.asRValue();
  1735. args.add(RV, type);
  1736. pushDestroy(EHCleanup, RV.getAggregateAddr(), type, destroyCXXObject,
  1737. /*useEHCleanupForArray*/ true);
  1738. // This unreachable is a temporary marker which will be removed later.
  1739. llvm::Instruction *IsActive = Builder.CreateUnreachable();
  1740. args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
  1741. return;
  1742. }
  1743. }
  1744. if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
  1745. cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
  1746. LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
  1747. assert(L.isSimple());
  1748. if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
  1749. args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
  1750. } else {
  1751. // We can't represent a misaligned lvalue in the CallArgList, so copy
  1752. // to an aligned temporary now.
  1753. llvm::Value *tmp = CreateMemTemp(type);
  1754. EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
  1755. L.getAlignment());
  1756. args.add(RValue::getAggregate(tmp), type);
  1757. }
  1758. return;
  1759. }
  1760. args.add(EmitAnyExprToTemp(E), type);
  1761. }
  1762. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  1763. // optimizer it can aggressively ignore unwind edges.
  1764. void
  1765. CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
  1766. if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
  1767. !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
  1768. Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
  1769. CGM.getNoObjCARCExceptionsMetadata());
  1770. }
  1771. /// Emits a call to the given no-arguments nounwind runtime function.
  1772. llvm::CallInst *
  1773. CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
  1774. const llvm::Twine &name) {
  1775. return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
  1776. }
  1777. /// Emits a call to the given nounwind runtime function.
  1778. llvm::CallInst *
  1779. CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
  1780. ArrayRef<llvm::Value*> args,
  1781. const llvm::Twine &name) {
  1782. llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
  1783. call->setDoesNotThrow();
  1784. return call;
  1785. }
  1786. /// Emits a simple call (never an invoke) to the given no-arguments
  1787. /// runtime function.
  1788. llvm::CallInst *
  1789. CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
  1790. const llvm::Twine &name) {
  1791. return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
  1792. }
  1793. /// Emits a simple call (never an invoke) to the given runtime
  1794. /// function.
  1795. llvm::CallInst *
  1796. CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
  1797. ArrayRef<llvm::Value*> args,
  1798. const llvm::Twine &name) {
  1799. llvm::CallInst *call = Builder.CreateCall(callee, args, name);
  1800. call->setCallingConv(getRuntimeCC());
  1801. return call;
  1802. }
  1803. /// Emits a call or invoke to the given noreturn runtime function.
  1804. void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
  1805. ArrayRef<llvm::Value*> args) {
  1806. if (getInvokeDest()) {
  1807. llvm::InvokeInst *invoke =
  1808. Builder.CreateInvoke(callee,
  1809. getUnreachableBlock(),
  1810. getInvokeDest(),
  1811. args);
  1812. invoke->setDoesNotReturn();
  1813. invoke->setCallingConv(getRuntimeCC());
  1814. } else {
  1815. llvm::CallInst *call = Builder.CreateCall(callee, args);
  1816. call->setDoesNotReturn();
  1817. call->setCallingConv(getRuntimeCC());
  1818. Builder.CreateUnreachable();
  1819. }
  1820. }
  1821. /// Emits a call or invoke instruction to the given nullary runtime
  1822. /// function.
  1823. llvm::CallSite
  1824. CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
  1825. const Twine &name) {
  1826. return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
  1827. }
  1828. /// Emits a call or invoke instruction to the given runtime function.
  1829. llvm::CallSite
  1830. CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
  1831. ArrayRef<llvm::Value*> args,
  1832. const Twine &name) {
  1833. llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
  1834. callSite.setCallingConv(getRuntimeCC());
  1835. return callSite;
  1836. }
  1837. llvm::CallSite
  1838. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  1839. const Twine &Name) {
  1840. return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
  1841. }
  1842. /// Emits a call or invoke instruction to the given function, depending
  1843. /// on the current state of the EH stack.
  1844. llvm::CallSite
  1845. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  1846. ArrayRef<llvm::Value *> Args,
  1847. const Twine &Name) {
  1848. llvm::BasicBlock *InvokeDest = getInvokeDest();
  1849. llvm::Instruction *Inst;
  1850. if (!InvokeDest)
  1851. Inst = Builder.CreateCall(Callee, Args, Name);
  1852. else {
  1853. llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
  1854. Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
  1855. EmitBlock(ContBB);
  1856. }
  1857. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  1858. // optimizer it can aggressively ignore unwind edges.
  1859. if (CGM.getLangOpts().ObjCAutoRefCount)
  1860. AddObjCARCExceptionMetadata(Inst);
  1861. return Inst;
  1862. }
  1863. static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
  1864. llvm::FunctionType *FTy) {
  1865. if (ArgNo < FTy->getNumParams())
  1866. assert(Elt->getType() == FTy->getParamType(ArgNo));
  1867. else
  1868. assert(FTy->isVarArg());
  1869. ++ArgNo;
  1870. }
  1871. void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
  1872. SmallVectorImpl<llvm::Value *> &Args,
  1873. llvm::FunctionType *IRFuncTy) {
  1874. if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
  1875. unsigned NumElts = AT->getSize().getZExtValue();
  1876. QualType EltTy = AT->getElementType();
  1877. llvm::Value *Addr = RV.getAggregateAddr();
  1878. for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
  1879. llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
  1880. RValue EltRV = convertTempToRValue(EltAddr, EltTy);
  1881. ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
  1882. }
  1883. } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
  1884. RecordDecl *RD = RT->getDecl();
  1885. assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
  1886. LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
  1887. if (RD->isUnion()) {
  1888. const FieldDecl *LargestFD = 0;
  1889. CharUnits UnionSize = CharUnits::Zero();
  1890. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  1891. i != e; ++i) {
  1892. const FieldDecl *FD = *i;
  1893. assert(!FD->isBitField() &&
  1894. "Cannot expand structure with bit-field members.");
  1895. CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
  1896. if (UnionSize < FieldSize) {
  1897. UnionSize = FieldSize;
  1898. LargestFD = FD;
  1899. }
  1900. }
  1901. if (LargestFD) {
  1902. RValue FldRV = EmitRValueForField(LV, LargestFD);
  1903. ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
  1904. }
  1905. } else {
  1906. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  1907. i != e; ++i) {
  1908. FieldDecl *FD = *i;
  1909. RValue FldRV = EmitRValueForField(LV, FD);
  1910. ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
  1911. }
  1912. }
  1913. } else if (Ty->isAnyComplexType()) {
  1914. ComplexPairTy CV = RV.getComplexVal();
  1915. Args.push_back(CV.first);
  1916. Args.push_back(CV.second);
  1917. } else {
  1918. assert(RV.isScalar() &&
  1919. "Unexpected non-scalar rvalue during struct expansion.");
  1920. // Insert a bitcast as needed.
  1921. llvm::Value *V = RV.getScalarVal();
  1922. if (Args.size() < IRFuncTy->getNumParams() &&
  1923. V->getType() != IRFuncTy->getParamType(Args.size()))
  1924. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
  1925. Args.push_back(V);
  1926. }
  1927. }
  1928. RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
  1929. llvm::Value *Callee,
  1930. ReturnValueSlot ReturnValue,
  1931. const CallArgList &CallArgs,
  1932. const Decl *TargetDecl,
  1933. llvm::Instruction **callOrInvoke) {
  1934. // FIXME: We no longer need the types from CallArgs; lift up and simplify.
  1935. SmallVector<llvm::Value*, 16> Args;
  1936. // Handle struct-return functions by passing a pointer to the
  1937. // location that we would like to return into.
  1938. QualType RetTy = CallInfo.getReturnType();
  1939. const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
  1940. // IRArgNo - Keep track of the argument number in the callee we're looking at.
  1941. unsigned IRArgNo = 0;
  1942. llvm::FunctionType *IRFuncTy =
  1943. cast<llvm::FunctionType>(
  1944. cast<llvm::PointerType>(Callee->getType())->getElementType());
  1945. // If the call returns a temporary with struct return, create a temporary
  1946. // alloca to hold the result, unless one is given to us.
  1947. if (CGM.ReturnTypeUsesSRet(CallInfo)) {
  1948. llvm::Value *Value = ReturnValue.getValue();
  1949. if (!Value)
  1950. Value = CreateMemTemp(RetTy);
  1951. Args.push_back(Value);
  1952. checkArgMatches(Value, IRArgNo, IRFuncTy);
  1953. }
  1954. assert(CallInfo.arg_size() == CallArgs.size() &&
  1955. "Mismatch between function signature & arguments.");
  1956. CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
  1957. for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
  1958. I != E; ++I, ++info_it) {
  1959. const ABIArgInfo &ArgInfo = info_it->info;
  1960. RValue RV = I->RV;
  1961. CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
  1962. // Insert a padding argument to ensure proper alignment.
  1963. if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
  1964. Args.push_back(llvm::UndefValue::get(PaddingType));
  1965. ++IRArgNo;
  1966. }
  1967. switch (ArgInfo.getKind()) {
  1968. case ABIArgInfo::Indirect: {
  1969. if (RV.isScalar() || RV.isComplex()) {
  1970. // Make a temporary alloca to pass the argument.
  1971. llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
  1972. if (ArgInfo.getIndirectAlign() > AI->getAlignment())
  1973. AI->setAlignment(ArgInfo.getIndirectAlign());
  1974. Args.push_back(AI);
  1975. LValue argLV =
  1976. MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
  1977. if (RV.isScalar())
  1978. EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true);
  1979. else
  1980. EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true);
  1981. // Validate argument match.
  1982. checkArgMatches(AI, IRArgNo, IRFuncTy);
  1983. } else {
  1984. // We want to avoid creating an unnecessary temporary+copy here;
  1985. // however, we need one in three cases:
  1986. // 1. If the argument is not byval, and we are required to copy the
  1987. // source. (This case doesn't occur on any common architecture.)
  1988. // 2. If the argument is byval, RV is not sufficiently aligned, and
  1989. // we cannot force it to be sufficiently aligned.
  1990. // 3. If the argument is byval, but RV is located in an address space
  1991. // different than that of the argument (0).
  1992. llvm::Value *Addr = RV.getAggregateAddr();
  1993. unsigned Align = ArgInfo.getIndirectAlign();
  1994. const llvm::DataLayout *TD = &CGM.getDataLayout();
  1995. const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
  1996. const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
  1997. IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
  1998. if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
  1999. (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
  2000. llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
  2001. (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
  2002. // Create an aligned temporary, and copy to it.
  2003. llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
  2004. if (Align > AI->getAlignment())
  2005. AI->setAlignment(Align);
  2006. Args.push_back(AI);
  2007. EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
  2008. // Validate argument match.
  2009. checkArgMatches(AI, IRArgNo, IRFuncTy);
  2010. } else {
  2011. // Skip the extra memcpy call.
  2012. Args.push_back(Addr);
  2013. // Validate argument match.
  2014. checkArgMatches(Addr, IRArgNo, IRFuncTy);
  2015. }
  2016. }
  2017. break;
  2018. }
  2019. case ABIArgInfo::Ignore:
  2020. break;
  2021. case ABIArgInfo::Extend:
  2022. case ABIArgInfo::Direct: {
  2023. if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
  2024. ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
  2025. ArgInfo.getDirectOffset() == 0) {
  2026. llvm::Value *V;
  2027. if (RV.isScalar())
  2028. V = RV.getScalarVal();
  2029. else
  2030. V = Builder.CreateLoad(RV.getAggregateAddr());
  2031. // If the argument doesn't match, perform a bitcast to coerce it. This
  2032. // can happen due to trivial type mismatches.
  2033. if (IRArgNo < IRFuncTy->getNumParams() &&
  2034. V->getType() != IRFuncTy->getParamType(IRArgNo))
  2035. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
  2036. Args.push_back(V);
  2037. checkArgMatches(V, IRArgNo, IRFuncTy);
  2038. break;
  2039. }
  2040. // FIXME: Avoid the conversion through memory if possible.
  2041. llvm::Value *SrcPtr;
  2042. if (RV.isScalar() || RV.isComplex()) {
  2043. SrcPtr = CreateMemTemp(I->Ty, "coerce");
  2044. LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
  2045. if (RV.isScalar()) {
  2046. EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true);
  2047. } else {
  2048. EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true);
  2049. }
  2050. } else
  2051. SrcPtr = RV.getAggregateAddr();
  2052. // If the value is offset in memory, apply the offset now.
  2053. if (unsigned Offs = ArgInfo.getDirectOffset()) {
  2054. SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
  2055. SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
  2056. SrcPtr = Builder.CreateBitCast(SrcPtr,
  2057. llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
  2058. }
  2059. // If the coerce-to type is a first class aggregate, we flatten it and
  2060. // pass the elements. Either way is semantically identical, but fast-isel
  2061. // and the optimizer generally likes scalar values better than FCAs.
  2062. if (llvm::StructType *STy =
  2063. dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
  2064. llvm::Type *SrcTy =
  2065. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  2066. uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
  2067. uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
  2068. // If the source type is smaller than the destination type of the
  2069. // coerce-to logic, copy the source value into a temp alloca the size
  2070. // of the destination type to allow loading all of it. The bits past
  2071. // the source value are left undef.
  2072. if (SrcSize < DstSize) {
  2073. llvm::AllocaInst *TempAlloca
  2074. = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
  2075. Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
  2076. SrcPtr = TempAlloca;
  2077. } else {
  2078. SrcPtr = Builder.CreateBitCast(SrcPtr,
  2079. llvm::PointerType::getUnqual(STy));
  2080. }
  2081. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  2082. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
  2083. llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
  2084. // We don't know what we're loading from.
  2085. LI->setAlignment(1);
  2086. Args.push_back(LI);
  2087. // Validate argument match.
  2088. checkArgMatches(LI, IRArgNo, IRFuncTy);
  2089. }
  2090. } else {
  2091. // In the simple case, just pass the coerced loaded value.
  2092. Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
  2093. *this));
  2094. // Validate argument match.
  2095. checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
  2096. }
  2097. break;
  2098. }
  2099. case ABIArgInfo::Expand:
  2100. ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
  2101. IRArgNo = Args.size();
  2102. break;
  2103. }
  2104. }
  2105. if (!CallArgs.getCleanupsToDeactivate().empty())
  2106. deactivateArgCleanupsBeforeCall(*this, CallArgs);
  2107. // If the callee is a bitcast of a function to a varargs pointer to function
  2108. // type, check to see if we can remove the bitcast. This handles some cases
  2109. // with unprototyped functions.
  2110. if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
  2111. if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
  2112. llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
  2113. llvm::FunctionType *CurFT =
  2114. cast<llvm::FunctionType>(CurPT->getElementType());
  2115. llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
  2116. if (CE->getOpcode() == llvm::Instruction::BitCast &&
  2117. ActualFT->getReturnType() == CurFT->getReturnType() &&
  2118. ActualFT->getNumParams() == CurFT->getNumParams() &&
  2119. ActualFT->getNumParams() == Args.size() &&
  2120. (CurFT->isVarArg() || !ActualFT->isVarArg())) {
  2121. bool ArgsMatch = true;
  2122. for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
  2123. if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
  2124. ArgsMatch = false;
  2125. break;
  2126. }
  2127. // Strip the cast if we can get away with it. This is a nice cleanup,
  2128. // but also allows us to inline the function at -O0 if it is marked
  2129. // always_inline.
  2130. if (ArgsMatch)
  2131. Callee = CalleeF;
  2132. }
  2133. }
  2134. unsigned CallingConv;
  2135. CodeGen::AttributeListType AttributeList;
  2136. CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
  2137. CallingConv, true);
  2138. llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
  2139. AttributeList);
  2140. llvm::BasicBlock *InvokeDest = 0;
  2141. if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
  2142. llvm::Attribute::NoUnwind))
  2143. InvokeDest = getInvokeDest();
  2144. llvm::CallSite CS;
  2145. if (!InvokeDest) {
  2146. CS = Builder.CreateCall(Callee, Args);
  2147. } else {
  2148. llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
  2149. CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
  2150. EmitBlock(Cont);
  2151. }
  2152. if (callOrInvoke)
  2153. *callOrInvoke = CS.getInstruction();
  2154. CS.setAttributes(Attrs);
  2155. CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
  2156. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  2157. // optimizer it can aggressively ignore unwind edges.
  2158. if (CGM.getLangOpts().ObjCAutoRefCount)
  2159. AddObjCARCExceptionMetadata(CS.getInstruction());
  2160. // If the call doesn't return, finish the basic block and clear the
  2161. // insertion point; this allows the rest of IRgen to discard
  2162. // unreachable code.
  2163. if (CS.doesNotReturn()) {
  2164. Builder.CreateUnreachable();
  2165. Builder.ClearInsertionPoint();
  2166. // FIXME: For now, emit a dummy basic block because expr emitters in
  2167. // generally are not ready to handle emitting expressions at unreachable
  2168. // points.
  2169. EnsureInsertPoint();
  2170. // Return a reasonable RValue.
  2171. return GetUndefRValue(RetTy);
  2172. }
  2173. llvm::Instruction *CI = CS.getInstruction();
  2174. if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
  2175. CI->setName("call");
  2176. // Emit any writebacks immediately. Arguably this should happen
  2177. // after any return-value munging.
  2178. if (CallArgs.hasWritebacks())
  2179. emitWritebacks(*this, CallArgs);
  2180. switch (RetAI.getKind()) {
  2181. case ABIArgInfo::Indirect:
  2182. return convertTempToRValue(Args[0], RetTy);
  2183. case ABIArgInfo::Ignore:
  2184. // If we are ignoring an argument that had a result, make sure to
  2185. // construct the appropriate return value for our caller.
  2186. return GetUndefRValue(RetTy);
  2187. case ABIArgInfo::Extend:
  2188. case ABIArgInfo::Direct: {
  2189. llvm::Type *RetIRTy = ConvertType(RetTy);
  2190. if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
  2191. switch (getEvaluationKind(RetTy)) {
  2192. case TEK_Complex: {
  2193. llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
  2194. llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
  2195. return RValue::getComplex(std::make_pair(Real, Imag));
  2196. }
  2197. case TEK_Aggregate: {
  2198. llvm::Value *DestPtr = ReturnValue.getValue();
  2199. bool DestIsVolatile = ReturnValue.isVolatile();
  2200. if (!DestPtr) {
  2201. DestPtr = CreateMemTemp(RetTy, "agg.tmp");
  2202. DestIsVolatile = false;
  2203. }
  2204. BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
  2205. return RValue::getAggregate(DestPtr);
  2206. }
  2207. case TEK_Scalar: {
  2208. // If the argument doesn't match, perform a bitcast to coerce it. This
  2209. // can happen due to trivial type mismatches.
  2210. llvm::Value *V = CI;
  2211. if (V->getType() != RetIRTy)
  2212. V = Builder.CreateBitCast(V, RetIRTy);
  2213. return RValue::get(V);
  2214. }
  2215. }
  2216. llvm_unreachable("bad evaluation kind");
  2217. }
  2218. llvm::Value *DestPtr = ReturnValue.getValue();
  2219. bool DestIsVolatile = ReturnValue.isVolatile();
  2220. if (!DestPtr) {
  2221. DestPtr = CreateMemTemp(RetTy, "coerce");
  2222. DestIsVolatile = false;
  2223. }
  2224. // If the value is offset in memory, apply the offset now.
  2225. llvm::Value *StorePtr = DestPtr;
  2226. if (unsigned Offs = RetAI.getDirectOffset()) {
  2227. StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
  2228. StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
  2229. StorePtr = Builder.CreateBitCast(StorePtr,
  2230. llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
  2231. }
  2232. CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
  2233. return convertTempToRValue(DestPtr, RetTy);
  2234. }
  2235. case ABIArgInfo::Expand:
  2236. llvm_unreachable("Invalid ABI kind for return argument");
  2237. }
  2238. llvm_unreachable("Unhandled ABIArgInfo::Kind");
  2239. }
  2240. /* VarArg handling */
  2241. llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
  2242. return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
  2243. }