CGCall.cpp 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869
  1. //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // These classes wrap the information about a call or function
  11. // definition used to handle ABI compliancy.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "CGCall.h"
  15. #include "CGCXXABI.h"
  16. #include "ABIInfo.h"
  17. #include "CodeGenFunction.h"
  18. #include "CodeGenModule.h"
  19. #include "clang/Basic/TargetInfo.h"
  20. #include "clang/AST/Decl.h"
  21. #include "clang/AST/DeclCXX.h"
  22. #include "clang/AST/DeclObjC.h"
  23. #include "clang/Frontend/CodeGenOptions.h"
  24. #include "llvm/Attributes.h"
  25. #include "llvm/Support/CallSite.h"
  26. #include "llvm/Target/TargetData.h"
  27. #include "llvm/InlineAsm.h"
  28. #include "llvm/Transforms/Utils/Local.h"
  29. using namespace clang;
  30. using namespace CodeGen;
  31. /***/
  32. static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
  33. switch (CC) {
  34. default: return llvm::CallingConv::C;
  35. case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
  36. case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
  37. case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
  38. case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
  39. case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
  40. // TODO: add support for CC_X86Pascal to llvm
  41. }
  42. }
  43. /// Derives the 'this' type for codegen purposes, i.e. ignoring method
  44. /// qualification.
  45. /// FIXME: address space qualification?
  46. static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
  47. QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
  48. return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
  49. }
  50. /// Returns the canonical formal type of the given C++ method.
  51. static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
  52. return MD->getType()->getCanonicalTypeUnqualified()
  53. .getAs<FunctionProtoType>();
  54. }
  55. /// Returns the "extra-canonicalized" return type, which discards
  56. /// qualifiers on the return type. Codegen doesn't care about them,
  57. /// and it makes ABI code a little easier to be able to assume that
  58. /// all parameter and return types are top-level unqualified.
  59. static CanQualType GetReturnType(QualType RetTy) {
  60. return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
  61. }
  62. const CGFunctionInfo &
  63. CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
  64. return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
  65. SmallVector<CanQualType, 16>(),
  66. FTNP->getExtInfo());
  67. }
  68. /// \param Args - contains any initial parameters besides those
  69. /// in the formal type
  70. static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
  71. SmallVectorImpl<CanQualType> &ArgTys,
  72. CanQual<FunctionProtoType> FTP) {
  73. // FIXME: Kill copy.
  74. for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
  75. ArgTys.push_back(FTP->getArgType(i));
  76. CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
  77. return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
  78. }
  79. const CGFunctionInfo &
  80. CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
  81. SmallVector<CanQualType, 16> ArgTys;
  82. return ::getFunctionInfo(*this, ArgTys, FTP);
  83. }
  84. static CallingConv getCallingConventionForDecl(const Decl *D) {
  85. // Set the appropriate calling convention for the Function.
  86. if (D->hasAttr<StdCallAttr>())
  87. return CC_X86StdCall;
  88. if (D->hasAttr<FastCallAttr>())
  89. return CC_X86FastCall;
  90. if (D->hasAttr<ThisCallAttr>())
  91. return CC_X86ThisCall;
  92. if (D->hasAttr<PascalAttr>())
  93. return CC_X86Pascal;
  94. if (PcsAttr *PCS = D->getAttr<PcsAttr>())
  95. return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
  96. return CC_C;
  97. }
  98. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
  99. const FunctionProtoType *FTP) {
  100. SmallVector<CanQualType, 16> ArgTys;
  101. // Add the 'this' pointer.
  102. ArgTys.push_back(GetThisType(Context, RD));
  103. return ::getFunctionInfo(*this, ArgTys,
  104. FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
  105. }
  106. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
  107. SmallVector<CanQualType, 16> ArgTys;
  108. assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
  109. assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
  110. // Add the 'this' pointer unless this is a static method.
  111. if (MD->isInstance())
  112. ArgTys.push_back(GetThisType(Context, MD->getParent()));
  113. return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
  114. }
  115. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
  116. CXXCtorType Type) {
  117. SmallVector<CanQualType, 16> ArgTys;
  118. ArgTys.push_back(GetThisType(Context, D->getParent()));
  119. CanQualType ResTy = Context.VoidTy;
  120. TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
  121. CanQual<FunctionProtoType> FTP = GetFormalType(D);
  122. // Add the formal parameters.
  123. for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
  124. ArgTys.push_back(FTP->getArgType(i));
  125. return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
  126. }
  127. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
  128. CXXDtorType Type) {
  129. SmallVector<CanQualType, 2> ArgTys;
  130. ArgTys.push_back(GetThisType(Context, D->getParent()));
  131. CanQualType ResTy = Context.VoidTy;
  132. TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
  133. CanQual<FunctionProtoType> FTP = GetFormalType(D);
  134. assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
  135. return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
  136. }
  137. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
  138. if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
  139. if (MD->isInstance())
  140. return getFunctionInfo(MD);
  141. CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
  142. assert(isa<FunctionType>(FTy));
  143. if (isa<FunctionNoProtoType>(FTy))
  144. return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
  145. assert(isa<FunctionProtoType>(FTy));
  146. return getFunctionInfo(FTy.getAs<FunctionProtoType>());
  147. }
  148. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
  149. SmallVector<CanQualType, 16> ArgTys;
  150. ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
  151. ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
  152. // FIXME: Kill copy?
  153. for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
  154. e = MD->param_end(); i != e; ++i) {
  155. ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
  156. }
  157. FunctionType::ExtInfo einfo;
  158. einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
  159. if (getContext().getLangOptions().ObjCAutoRefCount &&
  160. MD->hasAttr<NSReturnsRetainedAttr>())
  161. einfo = einfo.withProducesResult(true);
  162. return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
  163. }
  164. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
  165. // FIXME: Do we need to handle ObjCMethodDecl?
  166. const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
  167. if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
  168. return getFunctionInfo(CD, GD.getCtorType());
  169. if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
  170. return getFunctionInfo(DD, GD.getDtorType());
  171. return getFunctionInfo(FD);
  172. }
  173. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
  174. const CallArgList &Args,
  175. const FunctionType::ExtInfo &Info) {
  176. // FIXME: Kill copy.
  177. SmallVector<CanQualType, 16> ArgTys;
  178. for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
  179. i != e; ++i)
  180. ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
  181. return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
  182. }
  183. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
  184. const FunctionArgList &Args,
  185. const FunctionType::ExtInfo &Info) {
  186. // FIXME: Kill copy.
  187. SmallVector<CanQualType, 16> ArgTys;
  188. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  189. i != e; ++i)
  190. ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
  191. return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
  192. }
  193. const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
  194. SmallVector<CanQualType, 1> args;
  195. return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
  196. }
  197. const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
  198. const SmallVectorImpl<CanQualType> &ArgTys,
  199. const FunctionType::ExtInfo &Info) {
  200. #ifndef NDEBUG
  201. for (SmallVectorImpl<CanQualType>::const_iterator
  202. I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
  203. assert(I->isCanonicalAsParam());
  204. #endif
  205. unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
  206. // Lookup or create unique function info.
  207. llvm::FoldingSetNodeID ID;
  208. CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
  209. void *InsertPos = 0;
  210. CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
  211. if (FI)
  212. return *FI;
  213. // Construct the function info.
  214. FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
  215. Info.getHasRegParm(), Info.getRegParm(), ResTy,
  216. ArgTys.data(), ArgTys.size());
  217. FunctionInfos.InsertNode(FI, InsertPos);
  218. bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
  219. assert(Inserted && "Recursively being processed?");
  220. // Compute ABI information.
  221. getABIInfo().computeInfo(*FI);
  222. // Loop over all of the computed argument and return value info. If any of
  223. // them are direct or extend without a specified coerce type, specify the
  224. // default now.
  225. ABIArgInfo &RetInfo = FI->getReturnInfo();
  226. if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
  227. RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
  228. for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
  229. I != E; ++I)
  230. if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
  231. I->info.setCoerceToType(ConvertType(I->type));
  232. bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
  233. assert(Erased && "Not in set?");
  234. return *FI;
  235. }
  236. CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
  237. bool _NoReturn, bool returnsRetained,
  238. bool _HasRegParm, unsigned _RegParm,
  239. CanQualType ResTy,
  240. const CanQualType *ArgTys,
  241. unsigned NumArgTys)
  242. : CallingConvention(_CallingConvention),
  243. EffectiveCallingConvention(_CallingConvention),
  244. NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
  245. HasRegParm(_HasRegParm), RegParm(_RegParm)
  246. {
  247. NumArgs = NumArgTys;
  248. // FIXME: Coallocate with the CGFunctionInfo object.
  249. Args = new ArgInfo[1 + NumArgTys];
  250. Args[0].type = ResTy;
  251. for (unsigned i = 0; i != NumArgTys; ++i)
  252. Args[1 + i].type = ArgTys[i];
  253. }
  254. /***/
  255. void CodeGenTypes::GetExpandedTypes(QualType type,
  256. SmallVectorImpl<llvm::Type*> &expandedTypes) {
  257. if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
  258. uint64_t NumElts = AT->getSize().getZExtValue();
  259. for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
  260. GetExpandedTypes(AT->getElementType(), expandedTypes);
  261. } else if (const RecordType *RT = type->getAsStructureType()) {
  262. const RecordDecl *RD = RT->getDecl();
  263. assert(!RD->hasFlexibleArrayMember() &&
  264. "Cannot expand structure with flexible array.");
  265. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  266. i != e; ++i) {
  267. const FieldDecl *FD = *i;
  268. assert(!FD->isBitField() &&
  269. "Cannot expand structure with bit-field members.");
  270. GetExpandedTypes(FD->getType(), expandedTypes);
  271. }
  272. } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
  273. llvm::Type *EltTy = ConvertType(CT->getElementType());
  274. expandedTypes.push_back(EltTy);
  275. expandedTypes.push_back(EltTy);
  276. } else
  277. expandedTypes.push_back(ConvertType(type));
  278. }
  279. llvm::Function::arg_iterator
  280. CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
  281. llvm::Function::arg_iterator AI) {
  282. assert(LV.isSimple() &&
  283. "Unexpected non-simple lvalue during struct expansion.");
  284. llvm::Value *Addr = LV.getAddress();
  285. if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
  286. unsigned NumElts = AT->getSize().getZExtValue();
  287. QualType EltTy = AT->getElementType();
  288. for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
  289. llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
  290. LValue LV = MakeAddrLValue(EltAddr, EltTy);
  291. AI = ExpandTypeFromArgs(EltTy, LV, AI);
  292. }
  293. } else if (const RecordType *RT = Ty->getAsStructureType()) {
  294. RecordDecl *RD = RT->getDecl();
  295. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  296. i != e; ++i) {
  297. FieldDecl *FD = *i;
  298. QualType FT = FD->getType();
  299. // FIXME: What are the right qualifiers here?
  300. LValue LV = EmitLValueForField(Addr, FD, 0);
  301. AI = ExpandTypeFromArgs(FT, LV, AI);
  302. }
  303. } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
  304. QualType EltTy = CT->getElementType();
  305. llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real");
  306. EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
  307. llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 0, "imag");
  308. EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
  309. } else {
  310. EmitStoreThroughLValue(RValue::get(AI), LV);
  311. ++AI;
  312. }
  313. return AI;
  314. }
  315. /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
  316. /// accessing some number of bytes out of it, try to gep into the struct to get
  317. /// at its inner goodness. Dive as deep as possible without entering an element
  318. /// with an in-memory size smaller than DstSize.
  319. static llvm::Value *
  320. EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
  321. llvm::StructType *SrcSTy,
  322. uint64_t DstSize, CodeGenFunction &CGF) {
  323. // We can't dive into a zero-element struct.
  324. if (SrcSTy->getNumElements() == 0) return SrcPtr;
  325. llvm::Type *FirstElt = SrcSTy->getElementType(0);
  326. // If the first elt is at least as large as what we're looking for, or if the
  327. // first element is the same size as the whole struct, we can enter it.
  328. uint64_t FirstEltSize =
  329. CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
  330. if (FirstEltSize < DstSize &&
  331. FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
  332. return SrcPtr;
  333. // GEP into the first element.
  334. SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
  335. // If the first element is a struct, recurse.
  336. llvm::Type *SrcTy =
  337. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  338. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
  339. return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  340. return SrcPtr;
  341. }
  342. /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
  343. /// are either integers or pointers. This does a truncation of the value if it
  344. /// is too large or a zero extension if it is too small.
  345. static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
  346. llvm::Type *Ty,
  347. CodeGenFunction &CGF) {
  348. if (Val->getType() == Ty)
  349. return Val;
  350. if (isa<llvm::PointerType>(Val->getType())) {
  351. // If this is Pointer->Pointer avoid conversion to and from int.
  352. if (isa<llvm::PointerType>(Ty))
  353. return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
  354. // Convert the pointer to an integer so we can play with its width.
  355. Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
  356. }
  357. llvm::Type *DestIntTy = Ty;
  358. if (isa<llvm::PointerType>(DestIntTy))
  359. DestIntTy = CGF.IntPtrTy;
  360. if (Val->getType() != DestIntTy)
  361. Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
  362. if (isa<llvm::PointerType>(Ty))
  363. Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
  364. return Val;
  365. }
  366. /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
  367. /// a pointer to an object of type \arg Ty.
  368. ///
  369. /// This safely handles the case when the src type is smaller than the
  370. /// destination type; in this situation the values of bits which not
  371. /// present in the src are undefined.
  372. static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
  373. llvm::Type *Ty,
  374. CodeGenFunction &CGF) {
  375. llvm::Type *SrcTy =
  376. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  377. // If SrcTy and Ty are the same, just do a load.
  378. if (SrcTy == Ty)
  379. return CGF.Builder.CreateLoad(SrcPtr);
  380. uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
  381. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
  382. SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  383. SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  384. }
  385. uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
  386. // If the source and destination are integer or pointer types, just do an
  387. // extension or truncation to the desired type.
  388. if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
  389. (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
  390. llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
  391. return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
  392. }
  393. // If load is legal, just bitcast the src pointer.
  394. if (SrcSize >= DstSize) {
  395. // Generally SrcSize is never greater than DstSize, since this means we are
  396. // losing bits. However, this can happen in cases where the structure has
  397. // additional padding, for example due to a user specified alignment.
  398. //
  399. // FIXME: Assert that we aren't truncating non-padding bits when have access
  400. // to that information.
  401. llvm::Value *Casted =
  402. CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
  403. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
  404. // FIXME: Use better alignment / avoid requiring aligned load.
  405. Load->setAlignment(1);
  406. return Load;
  407. }
  408. // Otherwise do coercion through memory. This is stupid, but
  409. // simple.
  410. llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
  411. llvm::Value *Casted =
  412. CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
  413. llvm::StoreInst *Store =
  414. CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
  415. // FIXME: Use better alignment / avoid requiring aligned store.
  416. Store->setAlignment(1);
  417. return CGF.Builder.CreateLoad(Tmp);
  418. }
  419. // Function to store a first-class aggregate into memory. We prefer to
  420. // store the elements rather than the aggregate to be more friendly to
  421. // fast-isel.
  422. // FIXME: Do we need to recurse here?
  423. static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
  424. llvm::Value *DestPtr, bool DestIsVolatile,
  425. bool LowAlignment) {
  426. // Prefer scalar stores to first-class aggregate stores.
  427. if (llvm::StructType *STy =
  428. dyn_cast<llvm::StructType>(Val->getType())) {
  429. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  430. llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
  431. llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
  432. llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
  433. DestIsVolatile);
  434. if (LowAlignment)
  435. SI->setAlignment(1);
  436. }
  437. } else {
  438. CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
  439. }
  440. }
  441. /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
  442. /// where the source and destination may have different types.
  443. ///
  444. /// This safely handles the case when the src type is larger than the
  445. /// destination type; the upper bits of the src will be lost.
  446. static void CreateCoercedStore(llvm::Value *Src,
  447. llvm::Value *DstPtr,
  448. bool DstIsVolatile,
  449. CodeGenFunction &CGF) {
  450. llvm::Type *SrcTy = Src->getType();
  451. llvm::Type *DstTy =
  452. cast<llvm::PointerType>(DstPtr->getType())->getElementType();
  453. if (SrcTy == DstTy) {
  454. CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
  455. return;
  456. }
  457. uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
  458. if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
  459. DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
  460. DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
  461. }
  462. // If the source and destination are integer or pointer types, just do an
  463. // extension or truncation to the desired type.
  464. if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
  465. (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
  466. Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
  467. CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
  468. return;
  469. }
  470. uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
  471. // If store is legal, just bitcast the src pointer.
  472. if (SrcSize <= DstSize) {
  473. llvm::Value *Casted =
  474. CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
  475. // FIXME: Use better alignment / avoid requiring aligned store.
  476. BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
  477. } else {
  478. // Otherwise do coercion through memory. This is stupid, but
  479. // simple.
  480. // Generally SrcSize is never greater than DstSize, since this means we are
  481. // losing bits. However, this can happen in cases where the structure has
  482. // additional padding, for example due to a user specified alignment.
  483. //
  484. // FIXME: Assert that we aren't truncating non-padding bits when have access
  485. // to that information.
  486. llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
  487. CGF.Builder.CreateStore(Src, Tmp);
  488. llvm::Value *Casted =
  489. CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
  490. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
  491. // FIXME: Use better alignment / avoid requiring aligned load.
  492. Load->setAlignment(1);
  493. CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
  494. }
  495. }
  496. /***/
  497. bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
  498. return FI.getReturnInfo().isIndirect();
  499. }
  500. bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
  501. if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
  502. switch (BT->getKind()) {
  503. default:
  504. return false;
  505. case BuiltinType::Float:
  506. return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
  507. case BuiltinType::Double:
  508. return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
  509. case BuiltinType::LongDouble:
  510. return getContext().Target.useObjCFPRetForRealType(
  511. TargetInfo::LongDouble);
  512. }
  513. }
  514. return false;
  515. }
  516. llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
  517. const CGFunctionInfo &FI = getFunctionInfo(GD);
  518. // For definition purposes, don't consider a K&R function variadic.
  519. bool Variadic = false;
  520. if (const FunctionProtoType *FPT =
  521. cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
  522. Variadic = FPT->isVariadic();
  523. return GetFunctionType(FI, Variadic);
  524. }
  525. llvm::FunctionType *
  526. CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
  527. bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
  528. assert(Inserted && "Recursively being processed?");
  529. SmallVector<llvm::Type*, 8> argTypes;
  530. llvm::Type *resultType = 0;
  531. const ABIArgInfo &retAI = FI.getReturnInfo();
  532. switch (retAI.getKind()) {
  533. case ABIArgInfo::Expand:
  534. llvm_unreachable("Invalid ABI kind for return argument");
  535. case ABIArgInfo::Extend:
  536. case ABIArgInfo::Direct:
  537. resultType = retAI.getCoerceToType();
  538. break;
  539. case ABIArgInfo::Indirect: {
  540. assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
  541. resultType = llvm::Type::getVoidTy(getLLVMContext());
  542. QualType ret = FI.getReturnType();
  543. llvm::Type *ty = ConvertType(ret);
  544. unsigned addressSpace = Context.getTargetAddressSpace(ret);
  545. argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
  546. break;
  547. }
  548. case ABIArgInfo::Ignore:
  549. resultType = llvm::Type::getVoidTy(getLLVMContext());
  550. break;
  551. }
  552. for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
  553. ie = FI.arg_end(); it != ie; ++it) {
  554. const ABIArgInfo &argAI = it->info;
  555. switch (argAI.getKind()) {
  556. case ABIArgInfo::Ignore:
  557. break;
  558. case ABIArgInfo::Indirect: {
  559. // indirect arguments are always on the stack, which is addr space #0.
  560. llvm::Type *LTy = ConvertTypeForMem(it->type);
  561. argTypes.push_back(LTy->getPointerTo());
  562. break;
  563. }
  564. case ABIArgInfo::Extend:
  565. case ABIArgInfo::Direct: {
  566. // If the coerce-to type is a first class aggregate, flatten it. Either
  567. // way is semantically identical, but fast-isel and the optimizer
  568. // generally likes scalar values better than FCAs.
  569. llvm::Type *argType = argAI.getCoerceToType();
  570. if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
  571. for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
  572. argTypes.push_back(st->getElementType(i));
  573. } else {
  574. argTypes.push_back(argType);
  575. }
  576. break;
  577. }
  578. case ABIArgInfo::Expand:
  579. GetExpandedTypes(it->type, argTypes);
  580. break;
  581. }
  582. }
  583. bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
  584. assert(Erased && "Not in set?");
  585. return llvm::FunctionType::get(resultType, argTypes, isVariadic);
  586. }
  587. llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
  588. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  589. const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  590. if (!isFuncTypeConvertible(FPT))
  591. return llvm::StructType::get(getLLVMContext());
  592. const CGFunctionInfo *Info;
  593. if (isa<CXXDestructorDecl>(MD))
  594. Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
  595. else
  596. Info = &getFunctionInfo(MD);
  597. return GetFunctionType(*Info, FPT->isVariadic());
  598. }
  599. void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
  600. const Decl *TargetDecl,
  601. AttributeListType &PAL,
  602. unsigned &CallingConv) {
  603. unsigned FuncAttrs = 0;
  604. unsigned RetAttrs = 0;
  605. CallingConv = FI.getEffectiveCallingConvention();
  606. if (FI.isNoReturn())
  607. FuncAttrs |= llvm::Attribute::NoReturn;
  608. // FIXME: handle sseregparm someday...
  609. if (TargetDecl) {
  610. if (TargetDecl->hasAttr<NoThrowAttr>())
  611. FuncAttrs |= llvm::Attribute::NoUnwind;
  612. else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
  613. const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
  614. if (FPT && FPT->isNothrow(getContext()))
  615. FuncAttrs |= llvm::Attribute::NoUnwind;
  616. }
  617. if (TargetDecl->hasAttr<NoReturnAttr>())
  618. FuncAttrs |= llvm::Attribute::NoReturn;
  619. // 'const' and 'pure' attribute functions are also nounwind.
  620. if (TargetDecl->hasAttr<ConstAttr>()) {
  621. FuncAttrs |= llvm::Attribute::ReadNone;
  622. FuncAttrs |= llvm::Attribute::NoUnwind;
  623. } else if (TargetDecl->hasAttr<PureAttr>()) {
  624. FuncAttrs |= llvm::Attribute::ReadOnly;
  625. FuncAttrs |= llvm::Attribute::NoUnwind;
  626. }
  627. if (TargetDecl->hasAttr<MallocAttr>())
  628. RetAttrs |= llvm::Attribute::NoAlias;
  629. }
  630. if (CodeGenOpts.OptimizeSize)
  631. FuncAttrs |= llvm::Attribute::OptimizeForSize;
  632. if (CodeGenOpts.DisableRedZone)
  633. FuncAttrs |= llvm::Attribute::NoRedZone;
  634. if (CodeGenOpts.NoImplicitFloat)
  635. FuncAttrs |= llvm::Attribute::NoImplicitFloat;
  636. QualType RetTy = FI.getReturnType();
  637. unsigned Index = 1;
  638. const ABIArgInfo &RetAI = FI.getReturnInfo();
  639. switch (RetAI.getKind()) {
  640. case ABIArgInfo::Extend:
  641. if (RetTy->hasSignedIntegerRepresentation())
  642. RetAttrs |= llvm::Attribute::SExt;
  643. else if (RetTy->hasUnsignedIntegerRepresentation())
  644. RetAttrs |= llvm::Attribute::ZExt;
  645. break;
  646. case ABIArgInfo::Direct:
  647. case ABIArgInfo::Ignore:
  648. break;
  649. case ABIArgInfo::Indirect:
  650. PAL.push_back(llvm::AttributeWithIndex::get(Index,
  651. llvm::Attribute::StructRet));
  652. ++Index;
  653. // sret disables readnone and readonly
  654. FuncAttrs &= ~(llvm::Attribute::ReadOnly |
  655. llvm::Attribute::ReadNone);
  656. break;
  657. case ABIArgInfo::Expand:
  658. assert(0 && "Invalid ABI kind for return argument");
  659. }
  660. if (RetAttrs)
  661. PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
  662. // FIXME: RegParm should be reduced in case of global register variable.
  663. signed RegParm;
  664. if (FI.getHasRegParm())
  665. RegParm = FI.getRegParm();
  666. else
  667. RegParm = CodeGenOpts.NumRegisterParameters;
  668. unsigned PointerWidth = getContext().Target.getPointerWidth(0);
  669. for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
  670. ie = FI.arg_end(); it != ie; ++it) {
  671. QualType ParamType = it->type;
  672. const ABIArgInfo &AI = it->info;
  673. unsigned Attributes = 0;
  674. // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
  675. // have the corresponding parameter variable. It doesn't make
  676. // sense to do it here because parameters are so messed up.
  677. switch (AI.getKind()) {
  678. case ABIArgInfo::Extend:
  679. if (ParamType->isSignedIntegerOrEnumerationType())
  680. Attributes |= llvm::Attribute::SExt;
  681. else if (ParamType->isUnsignedIntegerOrEnumerationType())
  682. Attributes |= llvm::Attribute::ZExt;
  683. // FALL THROUGH
  684. case ABIArgInfo::Direct:
  685. if (RegParm > 0 &&
  686. (ParamType->isIntegerType() || ParamType->isPointerType())) {
  687. RegParm -=
  688. (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
  689. if (RegParm >= 0)
  690. Attributes |= llvm::Attribute::InReg;
  691. }
  692. // FIXME: handle sseregparm someday...
  693. if (llvm::StructType *STy =
  694. dyn_cast<llvm::StructType>(AI.getCoerceToType()))
  695. Index += STy->getNumElements()-1; // 1 will be added below.
  696. break;
  697. case ABIArgInfo::Indirect:
  698. if (AI.getIndirectByVal())
  699. Attributes |= llvm::Attribute::ByVal;
  700. Attributes |=
  701. llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
  702. // byval disables readnone and readonly.
  703. FuncAttrs &= ~(llvm::Attribute::ReadOnly |
  704. llvm::Attribute::ReadNone);
  705. break;
  706. case ABIArgInfo::Ignore:
  707. // Skip increment, no matching LLVM parameter.
  708. continue;
  709. case ABIArgInfo::Expand: {
  710. SmallVector<llvm::Type*, 8> types;
  711. // FIXME: This is rather inefficient. Do we ever actually need to do
  712. // anything here? The result should be just reconstructed on the other
  713. // side, so extension should be a non-issue.
  714. getTypes().GetExpandedTypes(ParamType, types);
  715. Index += types.size();
  716. continue;
  717. }
  718. }
  719. if (Attributes)
  720. PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
  721. ++Index;
  722. }
  723. if (FuncAttrs)
  724. PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
  725. }
  726. /// An argument came in as a promoted argument; demote it back to its
  727. /// declared type.
  728. static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
  729. const VarDecl *var,
  730. llvm::Value *value) {
  731. llvm::Type *varType = CGF.ConvertType(var->getType());
  732. // This can happen with promotions that actually don't change the
  733. // underlying type, like the enum promotions.
  734. if (value->getType() == varType) return value;
  735. assert((varType->isIntegerTy() || varType->isFloatingPointTy())
  736. && "unexpected promotion type");
  737. if (isa<llvm::IntegerType>(varType))
  738. return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
  739. return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
  740. }
  741. void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
  742. llvm::Function *Fn,
  743. const FunctionArgList &Args) {
  744. // If this is an implicit-return-zero function, go ahead and
  745. // initialize the return value. TODO: it might be nice to have
  746. // a more general mechanism for this that didn't require synthesized
  747. // return statements.
  748. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
  749. if (FD->hasImplicitReturnZero()) {
  750. QualType RetTy = FD->getResultType().getUnqualifiedType();
  751. llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
  752. llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
  753. Builder.CreateStore(Zero, ReturnValue);
  754. }
  755. }
  756. // FIXME: We no longer need the types from FunctionArgList; lift up and
  757. // simplify.
  758. // Emit allocs for param decls. Give the LLVM Argument nodes names.
  759. llvm::Function::arg_iterator AI = Fn->arg_begin();
  760. // Name the struct return argument.
  761. if (CGM.ReturnTypeUsesSRet(FI)) {
  762. AI->setName("agg.result");
  763. AI->addAttr(llvm::Attribute::NoAlias);
  764. ++AI;
  765. }
  766. assert(FI.arg_size() == Args.size() &&
  767. "Mismatch between function signature & arguments.");
  768. unsigned ArgNo = 1;
  769. CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
  770. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  771. i != e; ++i, ++info_it, ++ArgNo) {
  772. const VarDecl *Arg = *i;
  773. QualType Ty = info_it->type;
  774. const ABIArgInfo &ArgI = info_it->info;
  775. bool isPromoted =
  776. isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
  777. switch (ArgI.getKind()) {
  778. case ABIArgInfo::Indirect: {
  779. llvm::Value *V = AI;
  780. if (hasAggregateLLVMType(Ty)) {
  781. // Aggregates and complex variables are accessed by reference. All we
  782. // need to do is realign the value, if requested
  783. if (ArgI.getIndirectRealign()) {
  784. llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
  785. // Copy from the incoming argument pointer to the temporary with the
  786. // appropriate alignment.
  787. //
  788. // FIXME: We should have a common utility for generating an aggregate
  789. // copy.
  790. llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
  791. CharUnits Size = getContext().getTypeSizeInChars(Ty);
  792. llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
  793. llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
  794. Builder.CreateMemCpy(Dst,
  795. Src,
  796. llvm::ConstantInt::get(IntPtrTy,
  797. Size.getQuantity()),
  798. ArgI.getIndirectAlign(),
  799. false);
  800. V = AlignedTemp;
  801. }
  802. } else {
  803. // Load scalar value from indirect argument.
  804. CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
  805. V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
  806. if (isPromoted)
  807. V = emitArgumentDemotion(*this, Arg, V);
  808. }
  809. EmitParmDecl(*Arg, V, ArgNo);
  810. break;
  811. }
  812. case ABIArgInfo::Extend:
  813. case ABIArgInfo::Direct: {
  814. // If we have the trivial case, handle it with no muss and fuss.
  815. if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
  816. ArgI.getCoerceToType() == ConvertType(Ty) &&
  817. ArgI.getDirectOffset() == 0) {
  818. assert(AI != Fn->arg_end() && "Argument mismatch!");
  819. llvm::Value *V = AI;
  820. if (Arg->getType().isRestrictQualified())
  821. AI->addAttr(llvm::Attribute::NoAlias);
  822. // Ensure the argument is the correct type.
  823. if (V->getType() != ArgI.getCoerceToType())
  824. V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
  825. if (isPromoted)
  826. V = emitArgumentDemotion(*this, Arg, V);
  827. EmitParmDecl(*Arg, V, ArgNo);
  828. break;
  829. }
  830. llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
  831. // The alignment we need to use is the max of the requested alignment for
  832. // the argument plus the alignment required by our access code below.
  833. unsigned AlignmentToUse =
  834. CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
  835. AlignmentToUse = std::max(AlignmentToUse,
  836. (unsigned)getContext().getDeclAlign(Arg).getQuantity());
  837. Alloca->setAlignment(AlignmentToUse);
  838. llvm::Value *V = Alloca;
  839. llvm::Value *Ptr = V; // Pointer to store into.
  840. // If the value is offset in memory, apply the offset now.
  841. if (unsigned Offs = ArgI.getDirectOffset()) {
  842. Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
  843. Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
  844. Ptr = Builder.CreateBitCast(Ptr,
  845. llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
  846. }
  847. // If the coerce-to type is a first class aggregate, we flatten it and
  848. // pass the elements. Either way is semantically identical, but fast-isel
  849. // and the optimizer generally likes scalar values better than FCAs.
  850. if (llvm::StructType *STy =
  851. dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
  852. Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
  853. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  854. assert(AI != Fn->arg_end() && "Argument mismatch!");
  855. AI->setName(Arg->getName() + ".coerce" + Twine(i));
  856. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
  857. Builder.CreateStore(AI++, EltPtr);
  858. }
  859. } else {
  860. // Simple case, just do a coerced store of the argument into the alloca.
  861. assert(AI != Fn->arg_end() && "Argument mismatch!");
  862. AI->setName(Arg->getName() + ".coerce");
  863. CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
  864. }
  865. // Match to what EmitParmDecl is expecting for this type.
  866. if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
  867. V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
  868. if (isPromoted)
  869. V = emitArgumentDemotion(*this, Arg, V);
  870. }
  871. EmitParmDecl(*Arg, V, ArgNo);
  872. continue; // Skip ++AI increment, already done.
  873. }
  874. case ABIArgInfo::Expand: {
  875. // If this structure was expanded into multiple arguments then
  876. // we need to create a temporary and reconstruct it from the
  877. // arguments.
  878. llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
  879. llvm::Function::arg_iterator End =
  880. ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
  881. EmitParmDecl(*Arg, Temp, ArgNo);
  882. // Name the arguments used in expansion and increment AI.
  883. unsigned Index = 0;
  884. for (; AI != End; ++AI, ++Index)
  885. AI->setName(Arg->getName() + "." + Twine(Index));
  886. continue;
  887. }
  888. case ABIArgInfo::Ignore:
  889. // Initialize the local variable appropriately.
  890. if (hasAggregateLLVMType(Ty))
  891. EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
  892. else
  893. EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
  894. ArgNo);
  895. // Skip increment, no matching LLVM parameter.
  896. continue;
  897. }
  898. ++AI;
  899. }
  900. assert(AI == Fn->arg_end() && "Argument mismatch!");
  901. }
  902. /// Try to emit a fused autorelease of a return result.
  903. static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
  904. llvm::Value *result) {
  905. // We must be immediately followed the cast.
  906. llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
  907. if (BB->empty()) return 0;
  908. if (&BB->back() != result) return 0;
  909. llvm::Type *resultType = result->getType();
  910. // result is in a BasicBlock and is therefore an Instruction.
  911. llvm::Instruction *generator = cast<llvm::Instruction>(result);
  912. SmallVector<llvm::Instruction*,4> insnsToKill;
  913. // Look for:
  914. // %generator = bitcast %type1* %generator2 to %type2*
  915. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
  916. // We would have emitted this as a constant if the operand weren't
  917. // an Instruction.
  918. generator = cast<llvm::Instruction>(bitcast->getOperand(0));
  919. // Require the generator to be immediately followed by the cast.
  920. if (generator->getNextNode() != bitcast)
  921. return 0;
  922. insnsToKill.push_back(bitcast);
  923. }
  924. // Look for:
  925. // %generator = call i8* @objc_retain(i8* %originalResult)
  926. // or
  927. // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
  928. llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
  929. if (!call) return 0;
  930. bool doRetainAutorelease;
  931. if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
  932. doRetainAutorelease = true;
  933. } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
  934. .objc_retainAutoreleasedReturnValue) {
  935. doRetainAutorelease = false;
  936. // Look for an inline asm immediately preceding the call and kill it, too.
  937. llvm::Instruction *prev = call->getPrevNode();
  938. if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
  939. if (asmCall->getCalledValue()
  940. == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
  941. insnsToKill.push_back(prev);
  942. } else {
  943. return 0;
  944. }
  945. result = call->getArgOperand(0);
  946. insnsToKill.push_back(call);
  947. // Keep killing bitcasts, for sanity. Note that we no longer care
  948. // about precise ordering as long as there's exactly one use.
  949. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
  950. if (!bitcast->hasOneUse()) break;
  951. insnsToKill.push_back(bitcast);
  952. result = bitcast->getOperand(0);
  953. }
  954. // Delete all the unnecessary instructions, from latest to earliest.
  955. for (SmallVectorImpl<llvm::Instruction*>::iterator
  956. i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
  957. (*i)->eraseFromParent();
  958. // Do the fused retain/autorelease if we were asked to.
  959. if (doRetainAutorelease)
  960. result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
  961. // Cast back to the result type.
  962. return CGF.Builder.CreateBitCast(result, resultType);
  963. }
  964. /// Emit an ARC autorelease of the result of a function.
  965. static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
  966. llvm::Value *result) {
  967. // At -O0, try to emit a fused retain/autorelease.
  968. if (CGF.shouldUseFusedARCCalls())
  969. if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
  970. return fused;
  971. return CGF.EmitARCAutoreleaseReturnValue(result);
  972. }
  973. void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
  974. // Functions with no result always return void.
  975. if (ReturnValue == 0) {
  976. Builder.CreateRetVoid();
  977. return;
  978. }
  979. llvm::DebugLoc RetDbgLoc;
  980. llvm::Value *RV = 0;
  981. QualType RetTy = FI.getReturnType();
  982. const ABIArgInfo &RetAI = FI.getReturnInfo();
  983. switch (RetAI.getKind()) {
  984. case ABIArgInfo::Indirect: {
  985. unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
  986. if (RetTy->isAnyComplexType()) {
  987. ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
  988. StoreComplexToAddr(RT, CurFn->arg_begin(), false);
  989. } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
  990. // Do nothing; aggregrates get evaluated directly into the destination.
  991. } else {
  992. EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
  993. false, Alignment, RetTy);
  994. }
  995. break;
  996. }
  997. case ABIArgInfo::Extend:
  998. case ABIArgInfo::Direct:
  999. if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
  1000. RetAI.getDirectOffset() == 0) {
  1001. // The internal return value temp always will have pointer-to-return-type
  1002. // type, just do a load.
  1003. // If the instruction right before the insertion point is a store to the
  1004. // return value, we can elide the load, zap the store, and usually zap the
  1005. // alloca.
  1006. llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
  1007. llvm::StoreInst *SI = 0;
  1008. if (InsertBB->empty() ||
  1009. !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
  1010. SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
  1011. RV = Builder.CreateLoad(ReturnValue);
  1012. } else {
  1013. // Get the stored value and nuke the now-dead store.
  1014. RetDbgLoc = SI->getDebugLoc();
  1015. RV = SI->getValueOperand();
  1016. SI->eraseFromParent();
  1017. // If that was the only use of the return value, nuke it as well now.
  1018. if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
  1019. cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
  1020. ReturnValue = 0;
  1021. }
  1022. }
  1023. } else {
  1024. llvm::Value *V = ReturnValue;
  1025. // If the value is offset in memory, apply the offset now.
  1026. if (unsigned Offs = RetAI.getDirectOffset()) {
  1027. V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
  1028. V = Builder.CreateConstGEP1_32(V, Offs);
  1029. V = Builder.CreateBitCast(V,
  1030. llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
  1031. }
  1032. RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
  1033. }
  1034. // In ARC, end functions that return a retainable type with a call
  1035. // to objc_autoreleaseReturnValue.
  1036. if (AutoreleaseResult) {
  1037. assert(getLangOptions().ObjCAutoRefCount &&
  1038. !FI.isReturnsRetained() &&
  1039. RetTy->isObjCRetainableType());
  1040. RV = emitAutoreleaseOfResult(*this, RV);
  1041. }
  1042. break;
  1043. case ABIArgInfo::Ignore:
  1044. break;
  1045. case ABIArgInfo::Expand:
  1046. assert(0 && "Invalid ABI kind for return argument");
  1047. }
  1048. llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
  1049. if (!RetDbgLoc.isUnknown())
  1050. Ret->setDebugLoc(RetDbgLoc);
  1051. }
  1052. void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
  1053. const VarDecl *param) {
  1054. // StartFunction converted the ABI-lowered parameter(s) into a
  1055. // local alloca. We need to turn that into an r-value suitable
  1056. // for EmitCall.
  1057. llvm::Value *local = GetAddrOfLocalVar(param);
  1058. QualType type = param->getType();
  1059. // For the most part, we just need to load the alloca, except:
  1060. // 1) aggregate r-values are actually pointers to temporaries, and
  1061. // 2) references to aggregates are pointers directly to the aggregate.
  1062. // I don't know why references to non-aggregates are different here.
  1063. if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
  1064. if (hasAggregateLLVMType(ref->getPointeeType()))
  1065. return args.add(RValue::getAggregate(local), type);
  1066. // Locals which are references to scalars are represented
  1067. // with allocas holding the pointer.
  1068. return args.add(RValue::get(Builder.CreateLoad(local)), type);
  1069. }
  1070. if (type->isAnyComplexType()) {
  1071. ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
  1072. return args.add(RValue::getComplex(complex), type);
  1073. }
  1074. if (hasAggregateLLVMType(type))
  1075. return args.add(RValue::getAggregate(local), type);
  1076. unsigned alignment = getContext().getDeclAlign(param).getQuantity();
  1077. llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
  1078. return args.add(RValue::get(value), type);
  1079. }
  1080. static bool isProvablyNull(llvm::Value *addr) {
  1081. return isa<llvm::ConstantPointerNull>(addr);
  1082. }
  1083. static bool isProvablyNonNull(llvm::Value *addr) {
  1084. return isa<llvm::AllocaInst>(addr);
  1085. }
  1086. /// Emit the actual writing-back of a writeback.
  1087. static void emitWriteback(CodeGenFunction &CGF,
  1088. const CallArgList::Writeback &writeback) {
  1089. llvm::Value *srcAddr = writeback.Address;
  1090. assert(!isProvablyNull(srcAddr) &&
  1091. "shouldn't have writeback for provably null argument");
  1092. llvm::BasicBlock *contBB = 0;
  1093. // If the argument wasn't provably non-null, we need to null check
  1094. // before doing the store.
  1095. bool provablyNonNull = isProvablyNonNull(srcAddr);
  1096. if (!provablyNonNull) {
  1097. llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
  1098. contBB = CGF.createBasicBlock("icr.done");
  1099. llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
  1100. CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
  1101. CGF.EmitBlock(writebackBB);
  1102. }
  1103. // Load the value to writeback.
  1104. llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
  1105. // Cast it back, in case we're writing an id to a Foo* or something.
  1106. value = CGF.Builder.CreateBitCast(value,
  1107. cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
  1108. "icr.writeback-cast");
  1109. // Perform the writeback.
  1110. QualType srcAddrType = writeback.AddressType;
  1111. CGF.EmitStoreThroughLValue(RValue::get(value),
  1112. CGF.MakeAddrLValue(srcAddr, srcAddrType));
  1113. // Jump to the continuation block.
  1114. if (!provablyNonNull)
  1115. CGF.EmitBlock(contBB);
  1116. }
  1117. static void emitWritebacks(CodeGenFunction &CGF,
  1118. const CallArgList &args) {
  1119. for (CallArgList::writeback_iterator
  1120. i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
  1121. emitWriteback(CGF, *i);
  1122. }
  1123. /// Emit an argument that's being passed call-by-writeback. That is,
  1124. /// we are passing the address of
  1125. static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
  1126. const ObjCIndirectCopyRestoreExpr *CRE) {
  1127. llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
  1128. // The dest and src types don't necessarily match in LLVM terms
  1129. // because of the crazy ObjC compatibility rules.
  1130. llvm::PointerType *destType =
  1131. cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
  1132. // If the address is a constant null, just pass the appropriate null.
  1133. if (isProvablyNull(srcAddr)) {
  1134. args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
  1135. CRE->getType());
  1136. return;
  1137. }
  1138. QualType srcAddrType =
  1139. CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
  1140. // Create the temporary.
  1141. llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
  1142. "icr.temp");
  1143. // Zero-initialize it if we're not doing a copy-initialization.
  1144. bool shouldCopy = CRE->shouldCopy();
  1145. if (!shouldCopy) {
  1146. llvm::Value *null =
  1147. llvm::ConstantPointerNull::get(
  1148. cast<llvm::PointerType>(destType->getElementType()));
  1149. CGF.Builder.CreateStore(null, temp);
  1150. }
  1151. llvm::BasicBlock *contBB = 0;
  1152. // If the address is *not* known to be non-null, we need to switch.
  1153. llvm::Value *finalArgument;
  1154. bool provablyNonNull = isProvablyNonNull(srcAddr);
  1155. if (provablyNonNull) {
  1156. finalArgument = temp;
  1157. } else {
  1158. llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
  1159. finalArgument = CGF.Builder.CreateSelect(isNull,
  1160. llvm::ConstantPointerNull::get(destType),
  1161. temp, "icr.argument");
  1162. // If we need to copy, then the load has to be conditional, which
  1163. // means we need control flow.
  1164. if (shouldCopy) {
  1165. contBB = CGF.createBasicBlock("icr.cont");
  1166. llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
  1167. CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
  1168. CGF.EmitBlock(copyBB);
  1169. }
  1170. }
  1171. // Perform a copy if necessary.
  1172. if (shouldCopy) {
  1173. LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
  1174. RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
  1175. assert(srcRV.isScalar());
  1176. llvm::Value *src = srcRV.getScalarVal();
  1177. src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
  1178. "icr.cast");
  1179. // Use an ordinary store, not a store-to-lvalue.
  1180. CGF.Builder.CreateStore(src, temp);
  1181. }
  1182. // Finish the control flow if we needed it.
  1183. if (shouldCopy && !provablyNonNull)
  1184. CGF.EmitBlock(contBB);
  1185. args.addWriteback(srcAddr, srcAddrType, temp);
  1186. args.add(RValue::get(finalArgument), CRE->getType());
  1187. }
  1188. void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
  1189. QualType type) {
  1190. if (const ObjCIndirectCopyRestoreExpr *CRE
  1191. = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
  1192. assert(getContext().getLangOptions().ObjCAutoRefCount);
  1193. assert(getContext().hasSameType(E->getType(), type));
  1194. return emitWritebackArg(*this, args, CRE);
  1195. }
  1196. if (type->isReferenceType())
  1197. return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
  1198. type);
  1199. if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
  1200. isa<ImplicitCastExpr>(E) &&
  1201. cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
  1202. LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
  1203. assert(L.isSimple());
  1204. args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
  1205. type, /*NeedsCopy*/true);
  1206. return;
  1207. }
  1208. args.add(EmitAnyExprToTemp(E), type);
  1209. }
  1210. /// Emits a call or invoke instruction to the given function, depending
  1211. /// on the current state of the EH stack.
  1212. llvm::CallSite
  1213. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  1214. ArrayRef<llvm::Value *> Args,
  1215. const Twine &Name) {
  1216. llvm::BasicBlock *InvokeDest = getInvokeDest();
  1217. if (!InvokeDest)
  1218. return Builder.CreateCall(Callee, Args, Name);
  1219. llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
  1220. llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
  1221. Args, Name);
  1222. EmitBlock(ContBB);
  1223. return Invoke;
  1224. }
  1225. llvm::CallSite
  1226. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  1227. const Twine &Name) {
  1228. return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
  1229. }
  1230. static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
  1231. llvm::FunctionType *FTy) {
  1232. if (ArgNo < FTy->getNumParams())
  1233. assert(Elt->getType() == FTy->getParamType(ArgNo));
  1234. else
  1235. assert(FTy->isVarArg());
  1236. ++ArgNo;
  1237. }
  1238. void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
  1239. SmallVector<llvm::Value*,16> &Args,
  1240. llvm::FunctionType *IRFuncTy) {
  1241. if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
  1242. unsigned NumElts = AT->getSize().getZExtValue();
  1243. QualType EltTy = AT->getElementType();
  1244. llvm::Value *Addr = RV.getAggregateAddr();
  1245. for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
  1246. llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
  1247. LValue LV = MakeAddrLValue(EltAddr, EltTy);
  1248. RValue EltRV;
  1249. if (CodeGenFunction::hasAggregateLLVMType(EltTy))
  1250. EltRV = RValue::getAggregate(LV.getAddress());
  1251. else
  1252. EltRV = EmitLoadOfLValue(LV);
  1253. ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
  1254. }
  1255. } else if (const RecordType *RT = Ty->getAsStructureType()) {
  1256. RecordDecl *RD = RT->getDecl();
  1257. assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
  1258. llvm::Value *Addr = RV.getAggregateAddr();
  1259. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  1260. i != e; ++i) {
  1261. FieldDecl *FD = *i;
  1262. QualType FT = FD->getType();
  1263. // FIXME: What are the right qualifiers here?
  1264. LValue LV = EmitLValueForField(Addr, FD, 0);
  1265. RValue FldRV;
  1266. if (CodeGenFunction::hasAggregateLLVMType(FT))
  1267. FldRV = RValue::getAggregate(LV.getAddress());
  1268. else
  1269. FldRV = EmitLoadOfLValue(LV);
  1270. ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy);
  1271. }
  1272. } else if (isa<ComplexType>(Ty)) {
  1273. ComplexPairTy CV = RV.getComplexVal();
  1274. Args.push_back(CV.first);
  1275. Args.push_back(CV.second);
  1276. } else {
  1277. assert(RV.isScalar() &&
  1278. "Unexpected non-scalar rvalue during struct expansion.");
  1279. // Insert a bitcast as needed.
  1280. llvm::Value *V = RV.getScalarVal();
  1281. if (Args.size() < IRFuncTy->getNumParams() &&
  1282. V->getType() != IRFuncTy->getParamType(Args.size()))
  1283. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
  1284. Args.push_back(V);
  1285. }
  1286. }
  1287. RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
  1288. llvm::Value *Callee,
  1289. ReturnValueSlot ReturnValue,
  1290. const CallArgList &CallArgs,
  1291. const Decl *TargetDecl,
  1292. llvm::Instruction **callOrInvoke) {
  1293. // FIXME: We no longer need the types from CallArgs; lift up and simplify.
  1294. SmallVector<llvm::Value*, 16> Args;
  1295. // Handle struct-return functions by passing a pointer to the
  1296. // location that we would like to return into.
  1297. QualType RetTy = CallInfo.getReturnType();
  1298. const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
  1299. // IRArgNo - Keep track of the argument number in the callee we're looking at.
  1300. unsigned IRArgNo = 0;
  1301. llvm::FunctionType *IRFuncTy =
  1302. cast<llvm::FunctionType>(
  1303. cast<llvm::PointerType>(Callee->getType())->getElementType());
  1304. // If the call returns a temporary with struct return, create a temporary
  1305. // alloca to hold the result, unless one is given to us.
  1306. if (CGM.ReturnTypeUsesSRet(CallInfo)) {
  1307. llvm::Value *Value = ReturnValue.getValue();
  1308. if (!Value)
  1309. Value = CreateMemTemp(RetTy);
  1310. Args.push_back(Value);
  1311. checkArgMatches(Value, IRArgNo, IRFuncTy);
  1312. }
  1313. assert(CallInfo.arg_size() == CallArgs.size() &&
  1314. "Mismatch between function signature & arguments.");
  1315. CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
  1316. for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
  1317. I != E; ++I, ++info_it) {
  1318. const ABIArgInfo &ArgInfo = info_it->info;
  1319. RValue RV = I->RV;
  1320. unsigned TypeAlign =
  1321. getContext().getTypeAlignInChars(I->Ty).getQuantity();
  1322. switch (ArgInfo.getKind()) {
  1323. case ABIArgInfo::Indirect: {
  1324. if (RV.isScalar() || RV.isComplex()) {
  1325. // Make a temporary alloca to pass the argument.
  1326. llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
  1327. if (ArgInfo.getIndirectAlign() > AI->getAlignment())
  1328. AI->setAlignment(ArgInfo.getIndirectAlign());
  1329. Args.push_back(AI);
  1330. if (RV.isScalar())
  1331. EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
  1332. TypeAlign, I->Ty);
  1333. else
  1334. StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
  1335. // Validate argument match.
  1336. checkArgMatches(AI, IRArgNo, IRFuncTy);
  1337. } else {
  1338. // We want to avoid creating an unnecessary temporary+copy here;
  1339. // however, we need one in two cases:
  1340. // 1. If the argument is not byval, and we are required to copy the
  1341. // source. (This case doesn't occur on any common architecture.)
  1342. // 2. If the argument is byval, RV is not sufficiently aligned, and
  1343. // we cannot force it to be sufficiently aligned.
  1344. llvm::Value *Addr = RV.getAggregateAddr();
  1345. unsigned Align = ArgInfo.getIndirectAlign();
  1346. const llvm::TargetData *TD = &CGM.getTargetData();
  1347. if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
  1348. (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
  1349. llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
  1350. // Create an aligned temporary, and copy to it.
  1351. llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
  1352. if (Align > AI->getAlignment())
  1353. AI->setAlignment(Align);
  1354. Args.push_back(AI);
  1355. EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
  1356. // Validate argument match.
  1357. checkArgMatches(AI, IRArgNo, IRFuncTy);
  1358. } else {
  1359. // Skip the extra memcpy call.
  1360. Args.push_back(Addr);
  1361. // Validate argument match.
  1362. checkArgMatches(Addr, IRArgNo, IRFuncTy);
  1363. }
  1364. }
  1365. break;
  1366. }
  1367. case ABIArgInfo::Ignore:
  1368. break;
  1369. case ABIArgInfo::Extend:
  1370. case ABIArgInfo::Direct: {
  1371. if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
  1372. ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
  1373. ArgInfo.getDirectOffset() == 0) {
  1374. llvm::Value *V;
  1375. if (RV.isScalar())
  1376. V = RV.getScalarVal();
  1377. else
  1378. V = Builder.CreateLoad(RV.getAggregateAddr());
  1379. // If the argument doesn't match, perform a bitcast to coerce it. This
  1380. // can happen due to trivial type mismatches.
  1381. if (IRArgNo < IRFuncTy->getNumParams() &&
  1382. V->getType() != IRFuncTy->getParamType(IRArgNo))
  1383. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
  1384. Args.push_back(V);
  1385. checkArgMatches(V, IRArgNo, IRFuncTy);
  1386. break;
  1387. }
  1388. // FIXME: Avoid the conversion through memory if possible.
  1389. llvm::Value *SrcPtr;
  1390. if (RV.isScalar()) {
  1391. SrcPtr = CreateMemTemp(I->Ty, "coerce");
  1392. EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
  1393. } else if (RV.isComplex()) {
  1394. SrcPtr = CreateMemTemp(I->Ty, "coerce");
  1395. StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
  1396. } else
  1397. SrcPtr = RV.getAggregateAddr();
  1398. // If the value is offset in memory, apply the offset now.
  1399. if (unsigned Offs = ArgInfo.getDirectOffset()) {
  1400. SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
  1401. SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
  1402. SrcPtr = Builder.CreateBitCast(SrcPtr,
  1403. llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
  1404. }
  1405. // If the coerce-to type is a first class aggregate, we flatten it and
  1406. // pass the elements. Either way is semantically identical, but fast-isel
  1407. // and the optimizer generally likes scalar values better than FCAs.
  1408. if (llvm::StructType *STy =
  1409. dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
  1410. SrcPtr = Builder.CreateBitCast(SrcPtr,
  1411. llvm::PointerType::getUnqual(STy));
  1412. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  1413. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
  1414. llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
  1415. // We don't know what we're loading from.
  1416. LI->setAlignment(1);
  1417. Args.push_back(LI);
  1418. // Validate argument match.
  1419. checkArgMatches(LI, IRArgNo, IRFuncTy);
  1420. }
  1421. } else {
  1422. // In the simple case, just pass the coerced loaded value.
  1423. Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
  1424. *this));
  1425. // Validate argument match.
  1426. checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
  1427. }
  1428. break;
  1429. }
  1430. case ABIArgInfo::Expand:
  1431. ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
  1432. IRArgNo = Args.size();
  1433. break;
  1434. }
  1435. }
  1436. // If the callee is a bitcast of a function to a varargs pointer to function
  1437. // type, check to see if we can remove the bitcast. This handles some cases
  1438. // with unprototyped functions.
  1439. if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
  1440. if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
  1441. llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
  1442. llvm::FunctionType *CurFT =
  1443. cast<llvm::FunctionType>(CurPT->getElementType());
  1444. llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
  1445. if (CE->getOpcode() == llvm::Instruction::BitCast &&
  1446. ActualFT->getReturnType() == CurFT->getReturnType() &&
  1447. ActualFT->getNumParams() == CurFT->getNumParams() &&
  1448. ActualFT->getNumParams() == Args.size() &&
  1449. (CurFT->isVarArg() || !ActualFT->isVarArg())) {
  1450. bool ArgsMatch = true;
  1451. for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
  1452. if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
  1453. ArgsMatch = false;
  1454. break;
  1455. }
  1456. // Strip the cast if we can get away with it. This is a nice cleanup,
  1457. // but also allows us to inline the function at -O0 if it is marked
  1458. // always_inline.
  1459. if (ArgsMatch)
  1460. Callee = CalleeF;
  1461. }
  1462. }
  1463. unsigned CallingConv;
  1464. CodeGen::AttributeListType AttributeList;
  1465. CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
  1466. llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
  1467. AttributeList.end());
  1468. llvm::BasicBlock *InvokeDest = 0;
  1469. if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
  1470. InvokeDest = getInvokeDest();
  1471. llvm::CallSite CS;
  1472. if (!InvokeDest) {
  1473. CS = Builder.CreateCall(Callee, Args);
  1474. } else {
  1475. llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
  1476. CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
  1477. EmitBlock(Cont);
  1478. }
  1479. if (callOrInvoke)
  1480. *callOrInvoke = CS.getInstruction();
  1481. CS.setAttributes(Attrs);
  1482. CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
  1483. // If the call doesn't return, finish the basic block and clear the
  1484. // insertion point; this allows the rest of IRgen to discard
  1485. // unreachable code.
  1486. if (CS.doesNotReturn()) {
  1487. Builder.CreateUnreachable();
  1488. Builder.ClearInsertionPoint();
  1489. // FIXME: For now, emit a dummy basic block because expr emitters in
  1490. // generally are not ready to handle emitting expressions at unreachable
  1491. // points.
  1492. EnsureInsertPoint();
  1493. // Return a reasonable RValue.
  1494. return GetUndefRValue(RetTy);
  1495. }
  1496. llvm::Instruction *CI = CS.getInstruction();
  1497. if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
  1498. CI->setName("call");
  1499. // Emit any writebacks immediately. Arguably this should happen
  1500. // after any return-value munging.
  1501. if (CallArgs.hasWritebacks())
  1502. emitWritebacks(*this, CallArgs);
  1503. switch (RetAI.getKind()) {
  1504. case ABIArgInfo::Indirect: {
  1505. unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
  1506. if (RetTy->isAnyComplexType())
  1507. return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
  1508. if (CodeGenFunction::hasAggregateLLVMType(RetTy))
  1509. return RValue::getAggregate(Args[0]);
  1510. return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
  1511. }
  1512. case ABIArgInfo::Ignore:
  1513. // If we are ignoring an argument that had a result, make sure to
  1514. // construct the appropriate return value for our caller.
  1515. return GetUndefRValue(RetTy);
  1516. case ABIArgInfo::Extend:
  1517. case ABIArgInfo::Direct: {
  1518. llvm::Type *RetIRTy = ConvertType(RetTy);
  1519. if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
  1520. if (RetTy->isAnyComplexType()) {
  1521. llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
  1522. llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
  1523. return RValue::getComplex(std::make_pair(Real, Imag));
  1524. }
  1525. if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
  1526. llvm::Value *DestPtr = ReturnValue.getValue();
  1527. bool DestIsVolatile = ReturnValue.isVolatile();
  1528. if (!DestPtr) {
  1529. DestPtr = CreateMemTemp(RetTy, "agg.tmp");
  1530. DestIsVolatile = false;
  1531. }
  1532. BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
  1533. return RValue::getAggregate(DestPtr);
  1534. }
  1535. // If the argument doesn't match, perform a bitcast to coerce it. This
  1536. // can happen due to trivial type mismatches.
  1537. llvm::Value *V = CI;
  1538. if (V->getType() != RetIRTy)
  1539. V = Builder.CreateBitCast(V, RetIRTy);
  1540. return RValue::get(V);
  1541. }
  1542. llvm::Value *DestPtr = ReturnValue.getValue();
  1543. bool DestIsVolatile = ReturnValue.isVolatile();
  1544. if (!DestPtr) {
  1545. DestPtr = CreateMemTemp(RetTy, "coerce");
  1546. DestIsVolatile = false;
  1547. }
  1548. // If the value is offset in memory, apply the offset now.
  1549. llvm::Value *StorePtr = DestPtr;
  1550. if (unsigned Offs = RetAI.getDirectOffset()) {
  1551. StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
  1552. StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
  1553. StorePtr = Builder.CreateBitCast(StorePtr,
  1554. llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
  1555. }
  1556. CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
  1557. unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
  1558. if (RetTy->isAnyComplexType())
  1559. return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
  1560. if (CodeGenFunction::hasAggregateLLVMType(RetTy))
  1561. return RValue::getAggregate(DestPtr);
  1562. return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
  1563. }
  1564. case ABIArgInfo::Expand:
  1565. assert(0 && "Invalid ABI kind for return argument");
  1566. }
  1567. assert(0 && "Unhandled ABIArgInfo::Kind");
  1568. return RValue::get(0);
  1569. }
  1570. /* VarArg handling */
  1571. llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
  1572. return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
  1573. }