CGCall.cpp 80 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143
  1. //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // These classes wrap the information about a call or function
  11. // definition used to handle ABI compliancy.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "CGCall.h"
  15. #include "CGCXXABI.h"
  16. #include "ABIInfo.h"
  17. #include "CodeGenFunction.h"
  18. #include "CodeGenModule.h"
  19. #include "TargetInfo.h"
  20. #include "clang/Basic/TargetInfo.h"
  21. #include "clang/AST/Decl.h"
  22. #include "clang/AST/DeclCXX.h"
  23. #include "clang/AST/DeclObjC.h"
  24. #include "clang/Frontend/CodeGenOptions.h"
  25. #include "llvm/Attributes.h"
  26. #include "llvm/Support/CallSite.h"
  27. #include "llvm/Target/TargetData.h"
  28. #include "llvm/InlineAsm.h"
  29. #include "llvm/Transforms/Utils/Local.h"
  30. using namespace clang;
  31. using namespace CodeGen;
  32. /***/
  33. static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
  34. switch (CC) {
  35. default: return llvm::CallingConv::C;
  36. case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
  37. case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
  38. case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
  39. case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
  40. case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
  41. // TODO: add support for CC_X86Pascal to llvm
  42. }
  43. }
  44. /// Derives the 'this' type for codegen purposes, i.e. ignoring method
  45. /// qualification.
  46. /// FIXME: address space qualification?
  47. static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
  48. QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
  49. return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
  50. }
  51. /// Returns the canonical formal type of the given C++ method.
  52. static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
  53. return MD->getType()->getCanonicalTypeUnqualified()
  54. .getAs<FunctionProtoType>();
  55. }
  56. /// Returns the "extra-canonicalized" return type, which discards
  57. /// qualifiers on the return type. Codegen doesn't care about them,
  58. /// and it makes ABI code a little easier to be able to assume that
  59. /// all parameter and return types are top-level unqualified.
  60. static CanQualType GetReturnType(QualType RetTy) {
  61. return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
  62. }
  63. /// Arrange the argument and result information for a value of the
  64. /// given unprototyped function type.
  65. const CGFunctionInfo &
  66. CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
  67. // When translating an unprototyped function type, always use a
  68. // variadic type.
  69. return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
  70. ArrayRef<CanQualType>(),
  71. FTNP->getExtInfo(),
  72. RequiredArgs(0));
  73. }
  74. /// Arrange the argument and result information for a value of the
  75. /// given function type, on top of any implicit parameters already
  76. /// stored.
  77. static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
  78. SmallVectorImpl<CanQualType> &argTypes,
  79. CanQual<FunctionProtoType> FTP) {
  80. RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
  81. // FIXME: Kill copy.
  82. for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
  83. argTypes.push_back(FTP->getArgType(i));
  84. CanQualType resultType = FTP->getResultType().getUnqualifiedType();
  85. return CGT.arrangeFunctionType(resultType, argTypes,
  86. FTP->getExtInfo(), required);
  87. }
  88. /// Arrange the argument and result information for a value of the
  89. /// given function type.
  90. const CGFunctionInfo &
  91. CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
  92. SmallVector<CanQualType, 16> argTypes;
  93. return ::arrangeFunctionType(*this, argTypes, FTP);
  94. }
  95. static CallingConv getCallingConventionForDecl(const Decl *D) {
  96. // Set the appropriate calling convention for the Function.
  97. if (D->hasAttr<StdCallAttr>())
  98. return CC_X86StdCall;
  99. if (D->hasAttr<FastCallAttr>())
  100. return CC_X86FastCall;
  101. if (D->hasAttr<ThisCallAttr>())
  102. return CC_X86ThisCall;
  103. if (D->hasAttr<PascalAttr>())
  104. return CC_X86Pascal;
  105. if (PcsAttr *PCS = D->getAttr<PcsAttr>())
  106. return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
  107. return CC_C;
  108. }
  109. /// Arrange the argument and result information for a call to an
  110. /// unknown C++ non-static member function of the given abstract type.
  111. /// The member function must be an ordinary function, i.e. not a
  112. /// constructor or destructor.
  113. const CGFunctionInfo &
  114. CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
  115. const FunctionProtoType *FTP) {
  116. SmallVector<CanQualType, 16> argTypes;
  117. // Add the 'this' pointer.
  118. argTypes.push_back(GetThisType(Context, RD));
  119. return ::arrangeFunctionType(*this, argTypes,
  120. FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
  121. }
  122. /// Arrange the argument and result information for a declaration or
  123. /// definition of the given C++ non-static member function. The
  124. /// member function must be an ordinary function, i.e. not a
  125. /// constructor or destructor.
  126. const CGFunctionInfo &
  127. CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
  128. assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
  129. assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
  130. CanQual<FunctionProtoType> prototype = GetFormalType(MD);
  131. if (MD->isInstance()) {
  132. // The abstract case is perfectly fine.
  133. return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
  134. }
  135. return arrangeFunctionType(prototype);
  136. }
  137. /// Arrange the argument and result information for a declaration
  138. /// or definition to the given constructor variant.
  139. const CGFunctionInfo &
  140. CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
  141. CXXCtorType ctorKind) {
  142. SmallVector<CanQualType, 16> argTypes;
  143. argTypes.push_back(GetThisType(Context, D->getParent()));
  144. CanQualType resultType = Context.VoidTy;
  145. TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
  146. CanQual<FunctionProtoType> FTP = GetFormalType(D);
  147. RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
  148. // Add the formal parameters.
  149. for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
  150. argTypes.push_back(FTP->getArgType(i));
  151. return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
  152. }
  153. /// Arrange the argument and result information for a declaration,
  154. /// definition, or call to the given destructor variant. It so
  155. /// happens that all three cases produce the same information.
  156. const CGFunctionInfo &
  157. CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
  158. CXXDtorType dtorKind) {
  159. SmallVector<CanQualType, 2> argTypes;
  160. argTypes.push_back(GetThisType(Context, D->getParent()));
  161. CanQualType resultType = Context.VoidTy;
  162. TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
  163. CanQual<FunctionProtoType> FTP = GetFormalType(D);
  164. assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
  165. return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
  166. RequiredArgs::All);
  167. }
  168. /// Arrange the argument and result information for the declaration or
  169. /// definition of the given function.
  170. const CGFunctionInfo &
  171. CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
  172. if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
  173. if (MD->isInstance())
  174. return arrangeCXXMethodDeclaration(MD);
  175. CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
  176. assert(isa<FunctionType>(FTy));
  177. // When declaring a function without a prototype, always use a
  178. // non-variadic type.
  179. if (isa<FunctionNoProtoType>(FTy)) {
  180. CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
  181. return arrangeFunctionType(noProto->getResultType(),
  182. ArrayRef<CanQualType>(),
  183. noProto->getExtInfo(),
  184. RequiredArgs::All);
  185. }
  186. assert(isa<FunctionProtoType>(FTy));
  187. return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
  188. }
  189. /// Arrange the argument and result information for the declaration or
  190. /// definition of an Objective-C method.
  191. const CGFunctionInfo &
  192. CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
  193. // It happens that this is the same as a call with no optional
  194. // arguments, except also using the formal 'self' type.
  195. return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
  196. }
  197. /// Arrange the argument and result information for the function type
  198. /// through which to perform a send to the given Objective-C method,
  199. /// using the given receiver type. The receiver type is not always
  200. /// the 'self' type of the method or even an Objective-C pointer type.
  201. /// This is *not* the right method for actually performing such a
  202. /// message send, due to the possibility of optional arguments.
  203. const CGFunctionInfo &
  204. CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
  205. QualType receiverType) {
  206. SmallVector<CanQualType, 16> argTys;
  207. argTys.push_back(Context.getCanonicalParamType(receiverType));
  208. argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
  209. // FIXME: Kill copy?
  210. for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
  211. e = MD->param_end(); i != e; ++i) {
  212. argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
  213. }
  214. FunctionType::ExtInfo einfo;
  215. einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
  216. if (getContext().getLangOpts().ObjCAutoRefCount &&
  217. MD->hasAttr<NSReturnsRetainedAttr>())
  218. einfo = einfo.withProducesResult(true);
  219. RequiredArgs required =
  220. (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
  221. return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
  222. einfo, required);
  223. }
  224. const CGFunctionInfo &
  225. CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
  226. // FIXME: Do we need to handle ObjCMethodDecl?
  227. const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
  228. if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
  229. return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
  230. if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
  231. return arrangeCXXDestructor(DD, GD.getDtorType());
  232. return arrangeFunctionDeclaration(FD);
  233. }
  234. /// Figure out the rules for calling a function with the given formal
  235. /// type using the given arguments. The arguments are necessary
  236. /// because the function might be unprototyped, in which case it's
  237. /// target-dependent in crazy ways.
  238. const CGFunctionInfo &
  239. CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
  240. const FunctionType *fnType) {
  241. RequiredArgs required = RequiredArgs::All;
  242. if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
  243. if (proto->isVariadic())
  244. required = RequiredArgs(proto->getNumArgs());
  245. } else if (CGM.getTargetCodeGenInfo()
  246. .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
  247. required = RequiredArgs(0);
  248. }
  249. return arrangeFunctionCall(fnType->getResultType(), args,
  250. fnType->getExtInfo(), required);
  251. }
  252. const CGFunctionInfo &
  253. CodeGenTypes::arrangeFunctionCall(QualType resultType,
  254. const CallArgList &args,
  255. const FunctionType::ExtInfo &info,
  256. RequiredArgs required) {
  257. // FIXME: Kill copy.
  258. SmallVector<CanQualType, 16> argTypes;
  259. for (CallArgList::const_iterator i = args.begin(), e = args.end();
  260. i != e; ++i)
  261. argTypes.push_back(Context.getCanonicalParamType(i->Ty));
  262. return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
  263. required);
  264. }
  265. const CGFunctionInfo &
  266. CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
  267. const FunctionArgList &args,
  268. const FunctionType::ExtInfo &info,
  269. bool isVariadic) {
  270. // FIXME: Kill copy.
  271. SmallVector<CanQualType, 16> argTypes;
  272. for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
  273. i != e; ++i)
  274. argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
  275. RequiredArgs required =
  276. (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
  277. return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
  278. required);
  279. }
  280. const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
  281. return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
  282. FunctionType::ExtInfo(), RequiredArgs::All);
  283. }
  284. /// Arrange the argument and result information for an abstract value
  285. /// of a given function type. This is the method which all of the
  286. /// above functions ultimately defer to.
  287. const CGFunctionInfo &
  288. CodeGenTypes::arrangeFunctionType(CanQualType resultType,
  289. ArrayRef<CanQualType> argTypes,
  290. const FunctionType::ExtInfo &info,
  291. RequiredArgs required) {
  292. #ifndef NDEBUG
  293. for (ArrayRef<CanQualType>::const_iterator
  294. I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
  295. assert(I->isCanonicalAsParam());
  296. #endif
  297. unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
  298. // Lookup or create unique function info.
  299. llvm::FoldingSetNodeID ID;
  300. CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
  301. void *insertPos = 0;
  302. CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
  303. if (FI)
  304. return *FI;
  305. // Construct the function info. We co-allocate the ArgInfos.
  306. FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
  307. FunctionInfos.InsertNode(FI, insertPos);
  308. bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
  309. assert(inserted && "Recursively being processed?");
  310. // Compute ABI information.
  311. getABIInfo().computeInfo(*FI);
  312. // Loop over all of the computed argument and return value info. If any of
  313. // them are direct or extend without a specified coerce type, specify the
  314. // default now.
  315. ABIArgInfo &retInfo = FI->getReturnInfo();
  316. if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
  317. retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
  318. for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
  319. I != E; ++I)
  320. if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
  321. I->info.setCoerceToType(ConvertType(I->type));
  322. bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
  323. assert(erased && "Not in set?");
  324. return *FI;
  325. }
  326. CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
  327. const FunctionType::ExtInfo &info,
  328. CanQualType resultType,
  329. ArrayRef<CanQualType> argTypes,
  330. RequiredArgs required) {
  331. void *buffer = operator new(sizeof(CGFunctionInfo) +
  332. sizeof(ArgInfo) * (argTypes.size() + 1));
  333. CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
  334. FI->CallingConvention = llvmCC;
  335. FI->EffectiveCallingConvention = llvmCC;
  336. FI->ASTCallingConvention = info.getCC();
  337. FI->NoReturn = info.getNoReturn();
  338. FI->ReturnsRetained = info.getProducesResult();
  339. FI->Required = required;
  340. FI->HasRegParm = info.getHasRegParm();
  341. FI->RegParm = info.getRegParm();
  342. FI->NumArgs = argTypes.size();
  343. FI->getArgsBuffer()[0].type = resultType;
  344. for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
  345. FI->getArgsBuffer()[i + 1].type = argTypes[i];
  346. return FI;
  347. }
  348. /***/
  349. void CodeGenTypes::GetExpandedTypes(QualType type,
  350. SmallVectorImpl<llvm::Type*> &expandedTypes) {
  351. if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
  352. uint64_t NumElts = AT->getSize().getZExtValue();
  353. for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
  354. GetExpandedTypes(AT->getElementType(), expandedTypes);
  355. } else if (const RecordType *RT = type->getAsStructureType()) {
  356. const RecordDecl *RD = RT->getDecl();
  357. assert(!RD->hasFlexibleArrayMember() &&
  358. "Cannot expand structure with flexible array.");
  359. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  360. i != e; ++i) {
  361. const FieldDecl *FD = *i;
  362. assert(!FD->isBitField() &&
  363. "Cannot expand structure with bit-field members.");
  364. GetExpandedTypes(FD->getType(), expandedTypes);
  365. }
  366. } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
  367. llvm::Type *EltTy = ConvertType(CT->getElementType());
  368. expandedTypes.push_back(EltTy);
  369. expandedTypes.push_back(EltTy);
  370. } else
  371. expandedTypes.push_back(ConvertType(type));
  372. }
  373. llvm::Function::arg_iterator
  374. CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
  375. llvm::Function::arg_iterator AI) {
  376. assert(LV.isSimple() &&
  377. "Unexpected non-simple lvalue during struct expansion.");
  378. llvm::Value *Addr = LV.getAddress();
  379. if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
  380. unsigned NumElts = AT->getSize().getZExtValue();
  381. QualType EltTy = AT->getElementType();
  382. for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
  383. llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
  384. LValue LV = MakeAddrLValue(EltAddr, EltTy);
  385. AI = ExpandTypeFromArgs(EltTy, LV, AI);
  386. }
  387. } else if (const RecordType *RT = Ty->getAsStructureType()) {
  388. RecordDecl *RD = RT->getDecl();
  389. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  390. i != e; ++i) {
  391. FieldDecl *FD = *i;
  392. QualType FT = FD->getType();
  393. // FIXME: What are the right qualifiers here?
  394. LValue LV = EmitLValueForField(Addr, FD, 0);
  395. AI = ExpandTypeFromArgs(FT, LV, AI);
  396. }
  397. } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
  398. QualType EltTy = CT->getElementType();
  399. llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real");
  400. EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
  401. llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 1, "imag");
  402. EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
  403. } else {
  404. EmitStoreThroughLValue(RValue::get(AI), LV);
  405. ++AI;
  406. }
  407. return AI;
  408. }
  409. /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
  410. /// accessing some number of bytes out of it, try to gep into the struct to get
  411. /// at its inner goodness. Dive as deep as possible without entering an element
  412. /// with an in-memory size smaller than DstSize.
  413. static llvm::Value *
  414. EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
  415. llvm::StructType *SrcSTy,
  416. uint64_t DstSize, CodeGenFunction &CGF) {
  417. // We can't dive into a zero-element struct.
  418. if (SrcSTy->getNumElements() == 0) return SrcPtr;
  419. llvm::Type *FirstElt = SrcSTy->getElementType(0);
  420. // If the first elt is at least as large as what we're looking for, or if the
  421. // first element is the same size as the whole struct, we can enter it.
  422. uint64_t FirstEltSize =
  423. CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
  424. if (FirstEltSize < DstSize &&
  425. FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
  426. return SrcPtr;
  427. // GEP into the first element.
  428. SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
  429. // If the first element is a struct, recurse.
  430. llvm::Type *SrcTy =
  431. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  432. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
  433. return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  434. return SrcPtr;
  435. }
  436. /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
  437. /// are either integers or pointers. This does a truncation of the value if it
  438. /// is too large or a zero extension if it is too small.
  439. static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
  440. llvm::Type *Ty,
  441. CodeGenFunction &CGF) {
  442. if (Val->getType() == Ty)
  443. return Val;
  444. if (isa<llvm::PointerType>(Val->getType())) {
  445. // If this is Pointer->Pointer avoid conversion to and from int.
  446. if (isa<llvm::PointerType>(Ty))
  447. return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
  448. // Convert the pointer to an integer so we can play with its width.
  449. Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
  450. }
  451. llvm::Type *DestIntTy = Ty;
  452. if (isa<llvm::PointerType>(DestIntTy))
  453. DestIntTy = CGF.IntPtrTy;
  454. if (Val->getType() != DestIntTy)
  455. Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
  456. if (isa<llvm::PointerType>(Ty))
  457. Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
  458. return Val;
  459. }
  460. /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
  461. /// a pointer to an object of type \arg Ty.
  462. ///
  463. /// This safely handles the case when the src type is smaller than the
  464. /// destination type; in this situation the values of bits which not
  465. /// present in the src are undefined.
  466. static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
  467. llvm::Type *Ty,
  468. CodeGenFunction &CGF) {
  469. llvm::Type *SrcTy =
  470. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  471. // If SrcTy and Ty are the same, just do a load.
  472. if (SrcTy == Ty)
  473. return CGF.Builder.CreateLoad(SrcPtr);
  474. uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
  475. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
  476. SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  477. SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  478. }
  479. uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
  480. // If the source and destination are integer or pointer types, just do an
  481. // extension or truncation to the desired type.
  482. if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
  483. (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
  484. llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
  485. return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
  486. }
  487. // If load is legal, just bitcast the src pointer.
  488. if (SrcSize >= DstSize) {
  489. // Generally SrcSize is never greater than DstSize, since this means we are
  490. // losing bits. However, this can happen in cases where the structure has
  491. // additional padding, for example due to a user specified alignment.
  492. //
  493. // FIXME: Assert that we aren't truncating non-padding bits when have access
  494. // to that information.
  495. llvm::Value *Casted =
  496. CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
  497. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
  498. // FIXME: Use better alignment / avoid requiring aligned load.
  499. Load->setAlignment(1);
  500. return Load;
  501. }
  502. // Otherwise do coercion through memory. This is stupid, but
  503. // simple.
  504. llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
  505. llvm::Value *Casted =
  506. CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
  507. llvm::StoreInst *Store =
  508. CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
  509. // FIXME: Use better alignment / avoid requiring aligned store.
  510. Store->setAlignment(1);
  511. return CGF.Builder.CreateLoad(Tmp);
  512. }
  513. // Function to store a first-class aggregate into memory. We prefer to
  514. // store the elements rather than the aggregate to be more friendly to
  515. // fast-isel.
  516. // FIXME: Do we need to recurse here?
  517. static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
  518. llvm::Value *DestPtr, bool DestIsVolatile,
  519. bool LowAlignment) {
  520. // Prefer scalar stores to first-class aggregate stores.
  521. if (llvm::StructType *STy =
  522. dyn_cast<llvm::StructType>(Val->getType())) {
  523. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  524. llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
  525. llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
  526. llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
  527. DestIsVolatile);
  528. if (LowAlignment)
  529. SI->setAlignment(1);
  530. }
  531. } else {
  532. CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
  533. }
  534. }
  535. /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
  536. /// where the source and destination may have different types.
  537. ///
  538. /// This safely handles the case when the src type is larger than the
  539. /// destination type; the upper bits of the src will be lost.
  540. static void CreateCoercedStore(llvm::Value *Src,
  541. llvm::Value *DstPtr,
  542. bool DstIsVolatile,
  543. CodeGenFunction &CGF) {
  544. llvm::Type *SrcTy = Src->getType();
  545. llvm::Type *DstTy =
  546. cast<llvm::PointerType>(DstPtr->getType())->getElementType();
  547. if (SrcTy == DstTy) {
  548. CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
  549. return;
  550. }
  551. uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
  552. if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
  553. DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
  554. DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
  555. }
  556. // If the source and destination are integer or pointer types, just do an
  557. // extension or truncation to the desired type.
  558. if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
  559. (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
  560. Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
  561. CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
  562. return;
  563. }
  564. uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
  565. // If store is legal, just bitcast the src pointer.
  566. if (SrcSize <= DstSize) {
  567. llvm::Value *Casted =
  568. CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
  569. // FIXME: Use better alignment / avoid requiring aligned store.
  570. BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
  571. } else {
  572. // Otherwise do coercion through memory. This is stupid, but
  573. // simple.
  574. // Generally SrcSize is never greater than DstSize, since this means we are
  575. // losing bits. However, this can happen in cases where the structure has
  576. // additional padding, for example due to a user specified alignment.
  577. //
  578. // FIXME: Assert that we aren't truncating non-padding bits when have access
  579. // to that information.
  580. llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
  581. CGF.Builder.CreateStore(Src, Tmp);
  582. llvm::Value *Casted =
  583. CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
  584. llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
  585. // FIXME: Use better alignment / avoid requiring aligned load.
  586. Load->setAlignment(1);
  587. CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
  588. }
  589. }
  590. /***/
  591. bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
  592. return FI.getReturnInfo().isIndirect();
  593. }
  594. bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
  595. if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
  596. switch (BT->getKind()) {
  597. default:
  598. return false;
  599. case BuiltinType::Float:
  600. return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
  601. case BuiltinType::Double:
  602. return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
  603. case BuiltinType::LongDouble:
  604. return getContext().getTargetInfo().useObjCFPRetForRealType(
  605. TargetInfo::LongDouble);
  606. }
  607. }
  608. return false;
  609. }
  610. bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
  611. if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
  612. if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
  613. if (BT->getKind() == BuiltinType::LongDouble)
  614. return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
  615. }
  616. }
  617. return false;
  618. }
  619. llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
  620. const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
  621. return GetFunctionType(FI);
  622. }
  623. llvm::FunctionType *
  624. CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
  625. bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
  626. assert(Inserted && "Recursively being processed?");
  627. SmallVector<llvm::Type*, 8> argTypes;
  628. llvm::Type *resultType = 0;
  629. const ABIArgInfo &retAI = FI.getReturnInfo();
  630. switch (retAI.getKind()) {
  631. case ABIArgInfo::Expand:
  632. llvm_unreachable("Invalid ABI kind for return argument");
  633. case ABIArgInfo::Extend:
  634. case ABIArgInfo::Direct:
  635. resultType = retAI.getCoerceToType();
  636. break;
  637. case ABIArgInfo::Indirect: {
  638. assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
  639. resultType = llvm::Type::getVoidTy(getLLVMContext());
  640. QualType ret = FI.getReturnType();
  641. llvm::Type *ty = ConvertType(ret);
  642. unsigned addressSpace = Context.getTargetAddressSpace(ret);
  643. argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
  644. break;
  645. }
  646. case ABIArgInfo::Ignore:
  647. resultType = llvm::Type::getVoidTy(getLLVMContext());
  648. break;
  649. }
  650. for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
  651. ie = FI.arg_end(); it != ie; ++it) {
  652. const ABIArgInfo &argAI = it->info;
  653. switch (argAI.getKind()) {
  654. case ABIArgInfo::Ignore:
  655. break;
  656. case ABIArgInfo::Indirect: {
  657. // indirect arguments are always on the stack, which is addr space #0.
  658. llvm::Type *LTy = ConvertTypeForMem(it->type);
  659. argTypes.push_back(LTy->getPointerTo());
  660. break;
  661. }
  662. case ABIArgInfo::Extend:
  663. case ABIArgInfo::Direct: {
  664. // Insert a padding type to ensure proper alignment.
  665. if (llvm::Type *PaddingType = argAI.getPaddingType())
  666. argTypes.push_back(PaddingType);
  667. // If the coerce-to type is a first class aggregate, flatten it. Either
  668. // way is semantically identical, but fast-isel and the optimizer
  669. // generally likes scalar values better than FCAs.
  670. llvm::Type *argType = argAI.getCoerceToType();
  671. if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
  672. for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
  673. argTypes.push_back(st->getElementType(i));
  674. } else {
  675. argTypes.push_back(argType);
  676. }
  677. break;
  678. }
  679. case ABIArgInfo::Expand:
  680. GetExpandedTypes(it->type, argTypes);
  681. break;
  682. }
  683. }
  684. bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
  685. assert(Erased && "Not in set?");
  686. return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
  687. }
  688. llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
  689. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  690. const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  691. if (!isFuncTypeConvertible(FPT))
  692. return llvm::StructType::get(getLLVMContext());
  693. const CGFunctionInfo *Info;
  694. if (isa<CXXDestructorDecl>(MD))
  695. Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
  696. else
  697. Info = &arrangeCXXMethodDeclaration(MD);
  698. return GetFunctionType(*Info);
  699. }
  700. void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
  701. const Decl *TargetDecl,
  702. AttributeListType &PAL,
  703. unsigned &CallingConv) {
  704. llvm::Attributes FuncAttrs;
  705. llvm::Attributes RetAttrs;
  706. CallingConv = FI.getEffectiveCallingConvention();
  707. if (FI.isNoReturn())
  708. FuncAttrs |= llvm::Attribute::NoReturn;
  709. // FIXME: handle sseregparm someday...
  710. if (TargetDecl) {
  711. if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
  712. FuncAttrs |= llvm::Attribute::ReturnsTwice;
  713. if (TargetDecl->hasAttr<NoThrowAttr>())
  714. FuncAttrs |= llvm::Attribute::NoUnwind;
  715. else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
  716. const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
  717. if (FPT && FPT->isNothrow(getContext()))
  718. FuncAttrs |= llvm::Attribute::NoUnwind;
  719. }
  720. if (TargetDecl->hasAttr<NoReturnAttr>())
  721. FuncAttrs |= llvm::Attribute::NoReturn;
  722. if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
  723. FuncAttrs |= llvm::Attribute::ReturnsTwice;
  724. // 'const' and 'pure' attribute functions are also nounwind.
  725. if (TargetDecl->hasAttr<ConstAttr>()) {
  726. FuncAttrs |= llvm::Attribute::ReadNone;
  727. FuncAttrs |= llvm::Attribute::NoUnwind;
  728. } else if (TargetDecl->hasAttr<PureAttr>()) {
  729. FuncAttrs |= llvm::Attribute::ReadOnly;
  730. FuncAttrs |= llvm::Attribute::NoUnwind;
  731. }
  732. if (TargetDecl->hasAttr<MallocAttr>())
  733. RetAttrs |= llvm::Attribute::NoAlias;
  734. }
  735. if (CodeGenOpts.OptimizeSize)
  736. FuncAttrs |= llvm::Attribute::OptimizeForSize;
  737. if (CodeGenOpts.DisableRedZone)
  738. FuncAttrs |= llvm::Attribute::NoRedZone;
  739. if (CodeGenOpts.NoImplicitFloat)
  740. FuncAttrs |= llvm::Attribute::NoImplicitFloat;
  741. QualType RetTy = FI.getReturnType();
  742. unsigned Index = 1;
  743. const ABIArgInfo &RetAI = FI.getReturnInfo();
  744. switch (RetAI.getKind()) {
  745. case ABIArgInfo::Extend:
  746. if (RetTy->hasSignedIntegerRepresentation())
  747. RetAttrs |= llvm::Attribute::SExt;
  748. else if (RetTy->hasUnsignedIntegerRepresentation())
  749. RetAttrs |= llvm::Attribute::ZExt;
  750. break;
  751. case ABIArgInfo::Direct:
  752. case ABIArgInfo::Ignore:
  753. break;
  754. case ABIArgInfo::Indirect:
  755. PAL.push_back(llvm::AttributeWithIndex::get(Index,
  756. llvm::Attribute::StructRet));
  757. ++Index;
  758. // sret disables readnone and readonly
  759. FuncAttrs &= ~(llvm::Attribute::ReadOnly |
  760. llvm::Attribute::ReadNone);
  761. break;
  762. case ABIArgInfo::Expand:
  763. llvm_unreachable("Invalid ABI kind for return argument");
  764. }
  765. if (RetAttrs)
  766. PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
  767. // FIXME: RegParm should be reduced in case of global register variable.
  768. signed RegParm;
  769. if (FI.getHasRegParm())
  770. RegParm = FI.getRegParm();
  771. else
  772. RegParm = CodeGenOpts.NumRegisterParameters;
  773. unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
  774. for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
  775. ie = FI.arg_end(); it != ie; ++it) {
  776. QualType ParamType = it->type;
  777. const ABIArgInfo &AI = it->info;
  778. llvm::Attributes Attrs;
  779. // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
  780. // have the corresponding parameter variable. It doesn't make
  781. // sense to do it here because parameters are so messed up.
  782. switch (AI.getKind()) {
  783. case ABIArgInfo::Extend:
  784. if (ParamType->isSignedIntegerOrEnumerationType())
  785. Attrs |= llvm::Attribute::SExt;
  786. else if (ParamType->isUnsignedIntegerOrEnumerationType())
  787. Attrs |= llvm::Attribute::ZExt;
  788. // FALL THROUGH
  789. case ABIArgInfo::Direct:
  790. if (RegParm > 0 &&
  791. (ParamType->isIntegerType() || ParamType->isPointerType() ||
  792. ParamType->isReferenceType())) {
  793. RegParm -=
  794. (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
  795. if (RegParm >= 0)
  796. Attrs |= llvm::Attribute::InReg;
  797. }
  798. // FIXME: handle sseregparm someday...
  799. // Increment Index if there is padding.
  800. Index += (AI.getPaddingType() != 0);
  801. if (llvm::StructType *STy =
  802. dyn_cast<llvm::StructType>(AI.getCoerceToType()))
  803. Index += STy->getNumElements()-1; // 1 will be added below.
  804. break;
  805. case ABIArgInfo::Indirect:
  806. if (AI.getIndirectByVal())
  807. Attrs |= llvm::Attribute::ByVal;
  808. Attrs |=
  809. llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
  810. // byval disables readnone and readonly.
  811. FuncAttrs &= ~(llvm::Attribute::ReadOnly |
  812. llvm::Attribute::ReadNone);
  813. break;
  814. case ABIArgInfo::Ignore:
  815. // Skip increment, no matching LLVM parameter.
  816. continue;
  817. case ABIArgInfo::Expand: {
  818. SmallVector<llvm::Type*, 8> types;
  819. // FIXME: This is rather inefficient. Do we ever actually need to do
  820. // anything here? The result should be just reconstructed on the other
  821. // side, so extension should be a non-issue.
  822. getTypes().GetExpandedTypes(ParamType, types);
  823. Index += types.size();
  824. continue;
  825. }
  826. }
  827. if (Attrs)
  828. PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
  829. ++Index;
  830. }
  831. if (FuncAttrs)
  832. PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
  833. }
  834. /// An argument came in as a promoted argument; demote it back to its
  835. /// declared type.
  836. static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
  837. const VarDecl *var,
  838. llvm::Value *value) {
  839. llvm::Type *varType = CGF.ConvertType(var->getType());
  840. // This can happen with promotions that actually don't change the
  841. // underlying type, like the enum promotions.
  842. if (value->getType() == varType) return value;
  843. assert((varType->isIntegerTy() || varType->isFloatingPointTy())
  844. && "unexpected promotion type");
  845. if (isa<llvm::IntegerType>(varType))
  846. return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
  847. return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
  848. }
  849. void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
  850. llvm::Function *Fn,
  851. const FunctionArgList &Args) {
  852. // If this is an implicit-return-zero function, go ahead and
  853. // initialize the return value. TODO: it might be nice to have
  854. // a more general mechanism for this that didn't require synthesized
  855. // return statements.
  856. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
  857. if (FD->hasImplicitReturnZero()) {
  858. QualType RetTy = FD->getResultType().getUnqualifiedType();
  859. llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
  860. llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
  861. Builder.CreateStore(Zero, ReturnValue);
  862. }
  863. }
  864. // FIXME: We no longer need the types from FunctionArgList; lift up and
  865. // simplify.
  866. // Emit allocs for param decls. Give the LLVM Argument nodes names.
  867. llvm::Function::arg_iterator AI = Fn->arg_begin();
  868. // Name the struct return argument.
  869. if (CGM.ReturnTypeUsesSRet(FI)) {
  870. AI->setName("agg.result");
  871. AI->addAttr(llvm::Attribute::NoAlias);
  872. ++AI;
  873. }
  874. assert(FI.arg_size() == Args.size() &&
  875. "Mismatch between function signature & arguments.");
  876. unsigned ArgNo = 1;
  877. CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
  878. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  879. i != e; ++i, ++info_it, ++ArgNo) {
  880. const VarDecl *Arg = *i;
  881. QualType Ty = info_it->type;
  882. const ABIArgInfo &ArgI = info_it->info;
  883. bool isPromoted =
  884. isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
  885. switch (ArgI.getKind()) {
  886. case ABIArgInfo::Indirect: {
  887. llvm::Value *V = AI;
  888. if (hasAggregateLLVMType(Ty)) {
  889. // Aggregates and complex variables are accessed by reference. All we
  890. // need to do is realign the value, if requested
  891. if (ArgI.getIndirectRealign()) {
  892. llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
  893. // Copy from the incoming argument pointer to the temporary with the
  894. // appropriate alignment.
  895. //
  896. // FIXME: We should have a common utility for generating an aggregate
  897. // copy.
  898. llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
  899. CharUnits Size = getContext().getTypeSizeInChars(Ty);
  900. llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
  901. llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
  902. Builder.CreateMemCpy(Dst,
  903. Src,
  904. llvm::ConstantInt::get(IntPtrTy,
  905. Size.getQuantity()),
  906. ArgI.getIndirectAlign(),
  907. false);
  908. V = AlignedTemp;
  909. }
  910. } else {
  911. // Load scalar value from indirect argument.
  912. CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
  913. V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
  914. if (isPromoted)
  915. V = emitArgumentDemotion(*this, Arg, V);
  916. }
  917. EmitParmDecl(*Arg, V, ArgNo);
  918. break;
  919. }
  920. case ABIArgInfo::Extend:
  921. case ABIArgInfo::Direct: {
  922. // Skip the dummy padding argument.
  923. if (ArgI.getPaddingType())
  924. ++AI;
  925. // If we have the trivial case, handle it with no muss and fuss.
  926. if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
  927. ArgI.getCoerceToType() == ConvertType(Ty) &&
  928. ArgI.getDirectOffset() == 0) {
  929. assert(AI != Fn->arg_end() && "Argument mismatch!");
  930. llvm::Value *V = AI;
  931. if (Arg->getType().isRestrictQualified())
  932. AI->addAttr(llvm::Attribute::NoAlias);
  933. // Ensure the argument is the correct type.
  934. if (V->getType() != ArgI.getCoerceToType())
  935. V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
  936. if (isPromoted)
  937. V = emitArgumentDemotion(*this, Arg, V);
  938. EmitParmDecl(*Arg, V, ArgNo);
  939. break;
  940. }
  941. llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
  942. // The alignment we need to use is the max of the requested alignment for
  943. // the argument plus the alignment required by our access code below.
  944. unsigned AlignmentToUse =
  945. CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
  946. AlignmentToUse = std::max(AlignmentToUse,
  947. (unsigned)getContext().getDeclAlign(Arg).getQuantity());
  948. Alloca->setAlignment(AlignmentToUse);
  949. llvm::Value *V = Alloca;
  950. llvm::Value *Ptr = V; // Pointer to store into.
  951. // If the value is offset in memory, apply the offset now.
  952. if (unsigned Offs = ArgI.getDirectOffset()) {
  953. Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
  954. Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
  955. Ptr = Builder.CreateBitCast(Ptr,
  956. llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
  957. }
  958. // If the coerce-to type is a first class aggregate, we flatten it and
  959. // pass the elements. Either way is semantically identical, but fast-isel
  960. // and the optimizer generally likes scalar values better than FCAs.
  961. llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
  962. if (STy && STy->getNumElements() > 1) {
  963. uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
  964. llvm::Type *DstTy =
  965. cast<llvm::PointerType>(Ptr->getType())->getElementType();
  966. uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
  967. if (SrcSize <= DstSize) {
  968. Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
  969. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  970. assert(AI != Fn->arg_end() && "Argument mismatch!");
  971. AI->setName(Arg->getName() + ".coerce" + Twine(i));
  972. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
  973. Builder.CreateStore(AI++, EltPtr);
  974. }
  975. } else {
  976. llvm::AllocaInst *TempAlloca =
  977. CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
  978. TempAlloca->setAlignment(AlignmentToUse);
  979. llvm::Value *TempV = TempAlloca;
  980. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  981. assert(AI != Fn->arg_end() && "Argument mismatch!");
  982. AI->setName(Arg->getName() + ".coerce" + Twine(i));
  983. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
  984. Builder.CreateStore(AI++, EltPtr);
  985. }
  986. Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
  987. }
  988. } else {
  989. // Simple case, just do a coerced store of the argument into the alloca.
  990. assert(AI != Fn->arg_end() && "Argument mismatch!");
  991. AI->setName(Arg->getName() + ".coerce");
  992. CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
  993. }
  994. // Match to what EmitParmDecl is expecting for this type.
  995. if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
  996. V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
  997. if (isPromoted)
  998. V = emitArgumentDemotion(*this, Arg, V);
  999. }
  1000. EmitParmDecl(*Arg, V, ArgNo);
  1001. continue; // Skip ++AI increment, already done.
  1002. }
  1003. case ABIArgInfo::Expand: {
  1004. // If this structure was expanded into multiple arguments then
  1005. // we need to create a temporary and reconstruct it from the
  1006. // arguments.
  1007. llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
  1008. CharUnits Align = getContext().getDeclAlign(Arg);
  1009. Alloca->setAlignment(Align.getQuantity());
  1010. LValue LV = MakeAddrLValue(Alloca, Ty, Align);
  1011. llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
  1012. EmitParmDecl(*Arg, Alloca, ArgNo);
  1013. // Name the arguments used in expansion and increment AI.
  1014. unsigned Index = 0;
  1015. for (; AI != End; ++AI, ++Index)
  1016. AI->setName(Arg->getName() + "." + Twine(Index));
  1017. continue;
  1018. }
  1019. case ABIArgInfo::Ignore:
  1020. // Initialize the local variable appropriately.
  1021. if (hasAggregateLLVMType(Ty))
  1022. EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
  1023. else
  1024. EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
  1025. ArgNo);
  1026. // Skip increment, no matching LLVM parameter.
  1027. continue;
  1028. }
  1029. ++AI;
  1030. }
  1031. assert(AI == Fn->arg_end() && "Argument mismatch!");
  1032. }
  1033. static void eraseUnusedBitCasts(llvm::Instruction *insn) {
  1034. while (insn->use_empty()) {
  1035. llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
  1036. if (!bitcast) return;
  1037. // This is "safe" because we would have used a ConstantExpr otherwise.
  1038. insn = cast<llvm::Instruction>(bitcast->getOperand(0));
  1039. bitcast->eraseFromParent();
  1040. }
  1041. }
  1042. /// Try to emit a fused autorelease of a return result.
  1043. static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
  1044. llvm::Value *result) {
  1045. // We must be immediately followed the cast.
  1046. llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
  1047. if (BB->empty()) return 0;
  1048. if (&BB->back() != result) return 0;
  1049. llvm::Type *resultType = result->getType();
  1050. // result is in a BasicBlock and is therefore an Instruction.
  1051. llvm::Instruction *generator = cast<llvm::Instruction>(result);
  1052. SmallVector<llvm::Instruction*,4> insnsToKill;
  1053. // Look for:
  1054. // %generator = bitcast %type1* %generator2 to %type2*
  1055. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
  1056. // We would have emitted this as a constant if the operand weren't
  1057. // an Instruction.
  1058. generator = cast<llvm::Instruction>(bitcast->getOperand(0));
  1059. // Require the generator to be immediately followed by the cast.
  1060. if (generator->getNextNode() != bitcast)
  1061. return 0;
  1062. insnsToKill.push_back(bitcast);
  1063. }
  1064. // Look for:
  1065. // %generator = call i8* @objc_retain(i8* %originalResult)
  1066. // or
  1067. // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
  1068. llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
  1069. if (!call) return 0;
  1070. bool doRetainAutorelease;
  1071. if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
  1072. doRetainAutorelease = true;
  1073. } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
  1074. .objc_retainAutoreleasedReturnValue) {
  1075. doRetainAutorelease = false;
  1076. // Look for an inline asm immediately preceding the call and kill it, too.
  1077. llvm::Instruction *prev = call->getPrevNode();
  1078. if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
  1079. if (asmCall->getCalledValue()
  1080. == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
  1081. insnsToKill.push_back(prev);
  1082. } else {
  1083. return 0;
  1084. }
  1085. result = call->getArgOperand(0);
  1086. insnsToKill.push_back(call);
  1087. // Keep killing bitcasts, for sanity. Note that we no longer care
  1088. // about precise ordering as long as there's exactly one use.
  1089. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
  1090. if (!bitcast->hasOneUse()) break;
  1091. insnsToKill.push_back(bitcast);
  1092. result = bitcast->getOperand(0);
  1093. }
  1094. // Delete all the unnecessary instructions, from latest to earliest.
  1095. for (SmallVectorImpl<llvm::Instruction*>::iterator
  1096. i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
  1097. (*i)->eraseFromParent();
  1098. // Do the fused retain/autorelease if we were asked to.
  1099. if (doRetainAutorelease)
  1100. result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
  1101. // Cast back to the result type.
  1102. return CGF.Builder.CreateBitCast(result, resultType);
  1103. }
  1104. /// If this is a +1 of the value of an immutable 'self', remove it.
  1105. static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
  1106. llvm::Value *result) {
  1107. // This is only applicable to a method with an immutable 'self'.
  1108. const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
  1109. if (!method) return 0;
  1110. const VarDecl *self = method->getSelfDecl();
  1111. if (!self->getType().isConstQualified()) return 0;
  1112. // Look for a retain call.
  1113. llvm::CallInst *retainCall =
  1114. dyn_cast<llvm::CallInst>(result->stripPointerCasts());
  1115. if (!retainCall ||
  1116. retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
  1117. return 0;
  1118. // Look for an ordinary load of 'self'.
  1119. llvm::Value *retainedValue = retainCall->getArgOperand(0);
  1120. llvm::LoadInst *load =
  1121. dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
  1122. if (!load || load->isAtomic() || load->isVolatile() ||
  1123. load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
  1124. return 0;
  1125. // Okay! Burn it all down. This relies for correctness on the
  1126. // assumption that the retain is emitted as part of the return and
  1127. // that thereafter everything is used "linearly".
  1128. llvm::Type *resultType = result->getType();
  1129. eraseUnusedBitCasts(cast<llvm::Instruction>(result));
  1130. assert(retainCall->use_empty());
  1131. retainCall->eraseFromParent();
  1132. eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
  1133. return CGF.Builder.CreateBitCast(load, resultType);
  1134. }
  1135. /// Emit an ARC autorelease of the result of a function.
  1136. ///
  1137. /// \return the value to actually return from the function
  1138. static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
  1139. llvm::Value *result) {
  1140. // If we're returning 'self', kill the initial retain. This is a
  1141. // heuristic attempt to "encourage correctness" in the really unfortunate
  1142. // case where we have a return of self during a dealloc and we desperately
  1143. // need to avoid the possible autorelease.
  1144. if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
  1145. return self;
  1146. // At -O0, try to emit a fused retain/autorelease.
  1147. if (CGF.shouldUseFusedARCCalls())
  1148. if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
  1149. return fused;
  1150. return CGF.EmitARCAutoreleaseReturnValue(result);
  1151. }
  1152. /// Heuristically search for a dominating store to the return-value slot.
  1153. static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
  1154. // If there are multiple uses of the return-value slot, just check
  1155. // for something immediately preceding the IP. Sometimes this can
  1156. // happen with how we generate implicit-returns; it can also happen
  1157. // with noreturn cleanups.
  1158. if (!CGF.ReturnValue->hasOneUse()) {
  1159. llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
  1160. if (IP->empty()) return 0;
  1161. llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
  1162. if (!store) return 0;
  1163. if (store->getPointerOperand() != CGF.ReturnValue) return 0;
  1164. assert(!store->isAtomic() && !store->isVolatile()); // see below
  1165. return store;
  1166. }
  1167. llvm::StoreInst *store =
  1168. dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
  1169. if (!store) return 0;
  1170. // These aren't actually possible for non-coerced returns, and we
  1171. // only care about non-coerced returns on this code path.
  1172. assert(!store->isAtomic() && !store->isVolatile());
  1173. // Now do a first-and-dirty dominance check: just walk up the
  1174. // single-predecessors chain from the current insertion point.
  1175. llvm::BasicBlock *StoreBB = store->getParent();
  1176. llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
  1177. while (IP != StoreBB) {
  1178. if (!(IP = IP->getSinglePredecessor()))
  1179. return 0;
  1180. }
  1181. // Okay, the store's basic block dominates the insertion point; we
  1182. // can do our thing.
  1183. return store;
  1184. }
  1185. void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
  1186. // Functions with no result always return void.
  1187. if (ReturnValue == 0) {
  1188. Builder.CreateRetVoid();
  1189. return;
  1190. }
  1191. llvm::DebugLoc RetDbgLoc;
  1192. llvm::Value *RV = 0;
  1193. QualType RetTy = FI.getReturnType();
  1194. const ABIArgInfo &RetAI = FI.getReturnInfo();
  1195. switch (RetAI.getKind()) {
  1196. case ABIArgInfo::Indirect: {
  1197. unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
  1198. if (RetTy->isAnyComplexType()) {
  1199. ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
  1200. StoreComplexToAddr(RT, CurFn->arg_begin(), false);
  1201. } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
  1202. // Do nothing; aggregrates get evaluated directly into the destination.
  1203. } else {
  1204. EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
  1205. false, Alignment, RetTy);
  1206. }
  1207. break;
  1208. }
  1209. case ABIArgInfo::Extend:
  1210. case ABIArgInfo::Direct:
  1211. if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
  1212. RetAI.getDirectOffset() == 0) {
  1213. // The internal return value temp always will have pointer-to-return-type
  1214. // type, just do a load.
  1215. // If there is a dominating store to ReturnValue, we can elide
  1216. // the load, zap the store, and usually zap the alloca.
  1217. if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
  1218. // Get the stored value and nuke the now-dead store.
  1219. RetDbgLoc = SI->getDebugLoc();
  1220. RV = SI->getValueOperand();
  1221. SI->eraseFromParent();
  1222. // If that was the only use of the return value, nuke it as well now.
  1223. if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
  1224. cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
  1225. ReturnValue = 0;
  1226. }
  1227. // Otherwise, we have to do a simple load.
  1228. } else {
  1229. RV = Builder.CreateLoad(ReturnValue);
  1230. }
  1231. } else {
  1232. llvm::Value *V = ReturnValue;
  1233. // If the value is offset in memory, apply the offset now.
  1234. if (unsigned Offs = RetAI.getDirectOffset()) {
  1235. V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
  1236. V = Builder.CreateConstGEP1_32(V, Offs);
  1237. V = Builder.CreateBitCast(V,
  1238. llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
  1239. }
  1240. RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
  1241. }
  1242. // In ARC, end functions that return a retainable type with a call
  1243. // to objc_autoreleaseReturnValue.
  1244. if (AutoreleaseResult) {
  1245. assert(getLangOpts().ObjCAutoRefCount &&
  1246. !FI.isReturnsRetained() &&
  1247. RetTy->isObjCRetainableType());
  1248. RV = emitAutoreleaseOfResult(*this, RV);
  1249. }
  1250. break;
  1251. case ABIArgInfo::Ignore:
  1252. break;
  1253. case ABIArgInfo::Expand:
  1254. llvm_unreachable("Invalid ABI kind for return argument");
  1255. }
  1256. llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
  1257. if (!RetDbgLoc.isUnknown())
  1258. Ret->setDebugLoc(RetDbgLoc);
  1259. }
  1260. void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
  1261. const VarDecl *param) {
  1262. // StartFunction converted the ABI-lowered parameter(s) into a
  1263. // local alloca. We need to turn that into an r-value suitable
  1264. // for EmitCall.
  1265. llvm::Value *local = GetAddrOfLocalVar(param);
  1266. QualType type = param->getType();
  1267. // For the most part, we just need to load the alloca, except:
  1268. // 1) aggregate r-values are actually pointers to temporaries, and
  1269. // 2) references to aggregates are pointers directly to the aggregate.
  1270. // I don't know why references to non-aggregates are different here.
  1271. if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
  1272. if (hasAggregateLLVMType(ref->getPointeeType()))
  1273. return args.add(RValue::getAggregate(local), type);
  1274. // Locals which are references to scalars are represented
  1275. // with allocas holding the pointer.
  1276. return args.add(RValue::get(Builder.CreateLoad(local)), type);
  1277. }
  1278. if (type->isAnyComplexType()) {
  1279. ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
  1280. return args.add(RValue::getComplex(complex), type);
  1281. }
  1282. if (hasAggregateLLVMType(type))
  1283. return args.add(RValue::getAggregate(local), type);
  1284. unsigned alignment = getContext().getDeclAlign(param).getQuantity();
  1285. llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
  1286. return args.add(RValue::get(value), type);
  1287. }
  1288. static bool isProvablyNull(llvm::Value *addr) {
  1289. return isa<llvm::ConstantPointerNull>(addr);
  1290. }
  1291. static bool isProvablyNonNull(llvm::Value *addr) {
  1292. return isa<llvm::AllocaInst>(addr);
  1293. }
  1294. /// Emit the actual writing-back of a writeback.
  1295. static void emitWriteback(CodeGenFunction &CGF,
  1296. const CallArgList::Writeback &writeback) {
  1297. llvm::Value *srcAddr = writeback.Address;
  1298. assert(!isProvablyNull(srcAddr) &&
  1299. "shouldn't have writeback for provably null argument");
  1300. llvm::BasicBlock *contBB = 0;
  1301. // If the argument wasn't provably non-null, we need to null check
  1302. // before doing the store.
  1303. bool provablyNonNull = isProvablyNonNull(srcAddr);
  1304. if (!provablyNonNull) {
  1305. llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
  1306. contBB = CGF.createBasicBlock("icr.done");
  1307. llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
  1308. CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
  1309. CGF.EmitBlock(writebackBB);
  1310. }
  1311. // Load the value to writeback.
  1312. llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
  1313. // Cast it back, in case we're writing an id to a Foo* or something.
  1314. value = CGF.Builder.CreateBitCast(value,
  1315. cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
  1316. "icr.writeback-cast");
  1317. // Perform the writeback.
  1318. QualType srcAddrType = writeback.AddressType;
  1319. CGF.EmitStoreThroughLValue(RValue::get(value),
  1320. CGF.MakeAddrLValue(srcAddr, srcAddrType));
  1321. // Jump to the continuation block.
  1322. if (!provablyNonNull)
  1323. CGF.EmitBlock(contBB);
  1324. }
  1325. static void emitWritebacks(CodeGenFunction &CGF,
  1326. const CallArgList &args) {
  1327. for (CallArgList::writeback_iterator
  1328. i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
  1329. emitWriteback(CGF, *i);
  1330. }
  1331. /// Emit an argument that's being passed call-by-writeback. That is,
  1332. /// we are passing the address of
  1333. static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
  1334. const ObjCIndirectCopyRestoreExpr *CRE) {
  1335. llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
  1336. // The dest and src types don't necessarily match in LLVM terms
  1337. // because of the crazy ObjC compatibility rules.
  1338. llvm::PointerType *destType =
  1339. cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
  1340. // If the address is a constant null, just pass the appropriate null.
  1341. if (isProvablyNull(srcAddr)) {
  1342. args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
  1343. CRE->getType());
  1344. return;
  1345. }
  1346. QualType srcAddrType =
  1347. CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
  1348. // Create the temporary.
  1349. llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
  1350. "icr.temp");
  1351. // Zero-initialize it if we're not doing a copy-initialization.
  1352. bool shouldCopy = CRE->shouldCopy();
  1353. if (!shouldCopy) {
  1354. llvm::Value *null =
  1355. llvm::ConstantPointerNull::get(
  1356. cast<llvm::PointerType>(destType->getElementType()));
  1357. CGF.Builder.CreateStore(null, temp);
  1358. }
  1359. llvm::BasicBlock *contBB = 0;
  1360. // If the address is *not* known to be non-null, we need to switch.
  1361. llvm::Value *finalArgument;
  1362. bool provablyNonNull = isProvablyNonNull(srcAddr);
  1363. if (provablyNonNull) {
  1364. finalArgument = temp;
  1365. } else {
  1366. llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
  1367. finalArgument = CGF.Builder.CreateSelect(isNull,
  1368. llvm::ConstantPointerNull::get(destType),
  1369. temp, "icr.argument");
  1370. // If we need to copy, then the load has to be conditional, which
  1371. // means we need control flow.
  1372. if (shouldCopy) {
  1373. contBB = CGF.createBasicBlock("icr.cont");
  1374. llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
  1375. CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
  1376. CGF.EmitBlock(copyBB);
  1377. }
  1378. }
  1379. // Perform a copy if necessary.
  1380. if (shouldCopy) {
  1381. LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
  1382. RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
  1383. assert(srcRV.isScalar());
  1384. llvm::Value *src = srcRV.getScalarVal();
  1385. src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
  1386. "icr.cast");
  1387. // Use an ordinary store, not a store-to-lvalue.
  1388. CGF.Builder.CreateStore(src, temp);
  1389. }
  1390. // Finish the control flow if we needed it.
  1391. if (shouldCopy && !provablyNonNull)
  1392. CGF.EmitBlock(contBB);
  1393. args.addWriteback(srcAddr, srcAddrType, temp);
  1394. args.add(RValue::get(finalArgument), CRE->getType());
  1395. }
  1396. void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
  1397. QualType type) {
  1398. if (const ObjCIndirectCopyRestoreExpr *CRE
  1399. = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
  1400. assert(getContext().getLangOpts().ObjCAutoRefCount);
  1401. assert(getContext().hasSameType(E->getType(), type));
  1402. return emitWritebackArg(*this, args, CRE);
  1403. }
  1404. assert(type->isReferenceType() == E->isGLValue() &&
  1405. "reference binding to unmaterialized r-value!");
  1406. if (E->isGLValue()) {
  1407. assert(E->getObjectKind() == OK_Ordinary);
  1408. return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
  1409. type);
  1410. }
  1411. if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
  1412. isa<ImplicitCastExpr>(E) &&
  1413. cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
  1414. LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
  1415. assert(L.isSimple());
  1416. args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
  1417. return;
  1418. }
  1419. args.add(EmitAnyExprToTemp(E), type);
  1420. }
  1421. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  1422. // optimizer it can aggressively ignore unwind edges.
  1423. void
  1424. CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
  1425. if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
  1426. !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
  1427. Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
  1428. CGM.getNoObjCARCExceptionsMetadata());
  1429. }
  1430. /// Emits a call or invoke instruction to the given function, depending
  1431. /// on the current state of the EH stack.
  1432. llvm::CallSite
  1433. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  1434. ArrayRef<llvm::Value *> Args,
  1435. const Twine &Name) {
  1436. llvm::BasicBlock *InvokeDest = getInvokeDest();
  1437. llvm::Instruction *Inst;
  1438. if (!InvokeDest)
  1439. Inst = Builder.CreateCall(Callee, Args, Name);
  1440. else {
  1441. llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
  1442. Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
  1443. EmitBlock(ContBB);
  1444. }
  1445. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  1446. // optimizer it can aggressively ignore unwind edges.
  1447. if (CGM.getLangOpts().ObjCAutoRefCount)
  1448. AddObjCARCExceptionMetadata(Inst);
  1449. return Inst;
  1450. }
  1451. llvm::CallSite
  1452. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  1453. const Twine &Name) {
  1454. return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
  1455. }
  1456. static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
  1457. llvm::FunctionType *FTy) {
  1458. if (ArgNo < FTy->getNumParams())
  1459. assert(Elt->getType() == FTy->getParamType(ArgNo));
  1460. else
  1461. assert(FTy->isVarArg());
  1462. ++ArgNo;
  1463. }
  1464. void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
  1465. SmallVector<llvm::Value*,16> &Args,
  1466. llvm::FunctionType *IRFuncTy) {
  1467. if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
  1468. unsigned NumElts = AT->getSize().getZExtValue();
  1469. QualType EltTy = AT->getElementType();
  1470. llvm::Value *Addr = RV.getAggregateAddr();
  1471. for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
  1472. llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
  1473. LValue LV = MakeAddrLValue(EltAddr, EltTy);
  1474. RValue EltRV;
  1475. if (EltTy->isAnyComplexType())
  1476. // FIXME: Volatile?
  1477. EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
  1478. else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
  1479. EltRV = LV.asAggregateRValue();
  1480. else
  1481. EltRV = EmitLoadOfLValue(LV);
  1482. ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
  1483. }
  1484. } else if (const RecordType *RT = Ty->getAsStructureType()) {
  1485. RecordDecl *RD = RT->getDecl();
  1486. assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
  1487. llvm::Value *Addr = RV.getAggregateAddr();
  1488. for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
  1489. i != e; ++i) {
  1490. FieldDecl *FD = *i;
  1491. QualType FT = FD->getType();
  1492. // FIXME: What are the right qualifiers here?
  1493. LValue LV = EmitLValueForField(Addr, FD, 0);
  1494. RValue FldRV;
  1495. if (FT->isAnyComplexType())
  1496. // FIXME: Volatile?
  1497. FldRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
  1498. else if (CodeGenFunction::hasAggregateLLVMType(FT))
  1499. FldRV = LV.asAggregateRValue();
  1500. else
  1501. FldRV = EmitLoadOfLValue(LV);
  1502. ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy);
  1503. }
  1504. } else if (Ty->isAnyComplexType()) {
  1505. ComplexPairTy CV = RV.getComplexVal();
  1506. Args.push_back(CV.first);
  1507. Args.push_back(CV.second);
  1508. } else {
  1509. assert(RV.isScalar() &&
  1510. "Unexpected non-scalar rvalue during struct expansion.");
  1511. // Insert a bitcast as needed.
  1512. llvm::Value *V = RV.getScalarVal();
  1513. if (Args.size() < IRFuncTy->getNumParams() &&
  1514. V->getType() != IRFuncTy->getParamType(Args.size()))
  1515. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
  1516. Args.push_back(V);
  1517. }
  1518. }
  1519. RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
  1520. llvm::Value *Callee,
  1521. ReturnValueSlot ReturnValue,
  1522. const CallArgList &CallArgs,
  1523. const Decl *TargetDecl,
  1524. llvm::Instruction **callOrInvoke) {
  1525. // FIXME: We no longer need the types from CallArgs; lift up and simplify.
  1526. SmallVector<llvm::Value*, 16> Args;
  1527. // Handle struct-return functions by passing a pointer to the
  1528. // location that we would like to return into.
  1529. QualType RetTy = CallInfo.getReturnType();
  1530. const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
  1531. // IRArgNo - Keep track of the argument number in the callee we're looking at.
  1532. unsigned IRArgNo = 0;
  1533. llvm::FunctionType *IRFuncTy =
  1534. cast<llvm::FunctionType>(
  1535. cast<llvm::PointerType>(Callee->getType())->getElementType());
  1536. // If the call returns a temporary with struct return, create a temporary
  1537. // alloca to hold the result, unless one is given to us.
  1538. if (CGM.ReturnTypeUsesSRet(CallInfo)) {
  1539. llvm::Value *Value = ReturnValue.getValue();
  1540. if (!Value)
  1541. Value = CreateMemTemp(RetTy);
  1542. Args.push_back(Value);
  1543. checkArgMatches(Value, IRArgNo, IRFuncTy);
  1544. }
  1545. assert(CallInfo.arg_size() == CallArgs.size() &&
  1546. "Mismatch between function signature & arguments.");
  1547. CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
  1548. for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
  1549. I != E; ++I, ++info_it) {
  1550. const ABIArgInfo &ArgInfo = info_it->info;
  1551. RValue RV = I->RV;
  1552. unsigned TypeAlign =
  1553. getContext().getTypeAlignInChars(I->Ty).getQuantity();
  1554. switch (ArgInfo.getKind()) {
  1555. case ABIArgInfo::Indirect: {
  1556. if (RV.isScalar() || RV.isComplex()) {
  1557. // Make a temporary alloca to pass the argument.
  1558. llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
  1559. if (ArgInfo.getIndirectAlign() > AI->getAlignment())
  1560. AI->setAlignment(ArgInfo.getIndirectAlign());
  1561. Args.push_back(AI);
  1562. if (RV.isScalar())
  1563. EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
  1564. TypeAlign, I->Ty);
  1565. else
  1566. StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
  1567. // Validate argument match.
  1568. checkArgMatches(AI, IRArgNo, IRFuncTy);
  1569. } else {
  1570. // We want to avoid creating an unnecessary temporary+copy here;
  1571. // however, we need one in two cases:
  1572. // 1. If the argument is not byval, and we are required to copy the
  1573. // source. (This case doesn't occur on any common architecture.)
  1574. // 2. If the argument is byval, RV is not sufficiently aligned, and
  1575. // we cannot force it to be sufficiently aligned.
  1576. llvm::Value *Addr = RV.getAggregateAddr();
  1577. unsigned Align = ArgInfo.getIndirectAlign();
  1578. const llvm::TargetData *TD = &CGM.getTargetData();
  1579. if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
  1580. (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
  1581. llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
  1582. // Create an aligned temporary, and copy to it.
  1583. llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
  1584. if (Align > AI->getAlignment())
  1585. AI->setAlignment(Align);
  1586. Args.push_back(AI);
  1587. EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
  1588. // Validate argument match.
  1589. checkArgMatches(AI, IRArgNo, IRFuncTy);
  1590. } else {
  1591. // Skip the extra memcpy call.
  1592. Args.push_back(Addr);
  1593. // Validate argument match.
  1594. checkArgMatches(Addr, IRArgNo, IRFuncTy);
  1595. }
  1596. }
  1597. break;
  1598. }
  1599. case ABIArgInfo::Ignore:
  1600. break;
  1601. case ABIArgInfo::Extend:
  1602. case ABIArgInfo::Direct: {
  1603. // Insert a padding argument to ensure proper alignment.
  1604. if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
  1605. Args.push_back(llvm::UndefValue::get(PaddingType));
  1606. ++IRArgNo;
  1607. }
  1608. if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
  1609. ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
  1610. ArgInfo.getDirectOffset() == 0) {
  1611. llvm::Value *V;
  1612. if (RV.isScalar())
  1613. V = RV.getScalarVal();
  1614. else
  1615. V = Builder.CreateLoad(RV.getAggregateAddr());
  1616. // If the argument doesn't match, perform a bitcast to coerce it. This
  1617. // can happen due to trivial type mismatches.
  1618. if (IRArgNo < IRFuncTy->getNumParams() &&
  1619. V->getType() != IRFuncTy->getParamType(IRArgNo))
  1620. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
  1621. Args.push_back(V);
  1622. checkArgMatches(V, IRArgNo, IRFuncTy);
  1623. break;
  1624. }
  1625. // FIXME: Avoid the conversion through memory if possible.
  1626. llvm::Value *SrcPtr;
  1627. if (RV.isScalar()) {
  1628. SrcPtr = CreateMemTemp(I->Ty, "coerce");
  1629. EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
  1630. } else if (RV.isComplex()) {
  1631. SrcPtr = CreateMemTemp(I->Ty, "coerce");
  1632. StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
  1633. } else
  1634. SrcPtr = RV.getAggregateAddr();
  1635. // If the value is offset in memory, apply the offset now.
  1636. if (unsigned Offs = ArgInfo.getDirectOffset()) {
  1637. SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
  1638. SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
  1639. SrcPtr = Builder.CreateBitCast(SrcPtr,
  1640. llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
  1641. }
  1642. // If the coerce-to type is a first class aggregate, we flatten it and
  1643. // pass the elements. Either way is semantically identical, but fast-isel
  1644. // and the optimizer generally likes scalar values better than FCAs.
  1645. if (llvm::StructType *STy =
  1646. dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
  1647. SrcPtr = Builder.CreateBitCast(SrcPtr,
  1648. llvm::PointerType::getUnqual(STy));
  1649. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  1650. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
  1651. llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
  1652. // We don't know what we're loading from.
  1653. LI->setAlignment(1);
  1654. Args.push_back(LI);
  1655. // Validate argument match.
  1656. checkArgMatches(LI, IRArgNo, IRFuncTy);
  1657. }
  1658. } else {
  1659. // In the simple case, just pass the coerced loaded value.
  1660. Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
  1661. *this));
  1662. // Validate argument match.
  1663. checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
  1664. }
  1665. break;
  1666. }
  1667. case ABIArgInfo::Expand:
  1668. ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
  1669. IRArgNo = Args.size();
  1670. break;
  1671. }
  1672. }
  1673. // If the callee is a bitcast of a function to a varargs pointer to function
  1674. // type, check to see if we can remove the bitcast. This handles some cases
  1675. // with unprototyped functions.
  1676. if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
  1677. if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
  1678. llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
  1679. llvm::FunctionType *CurFT =
  1680. cast<llvm::FunctionType>(CurPT->getElementType());
  1681. llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
  1682. if (CE->getOpcode() == llvm::Instruction::BitCast &&
  1683. ActualFT->getReturnType() == CurFT->getReturnType() &&
  1684. ActualFT->getNumParams() == CurFT->getNumParams() &&
  1685. ActualFT->getNumParams() == Args.size() &&
  1686. (CurFT->isVarArg() || !ActualFT->isVarArg())) {
  1687. bool ArgsMatch = true;
  1688. for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
  1689. if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
  1690. ArgsMatch = false;
  1691. break;
  1692. }
  1693. // Strip the cast if we can get away with it. This is a nice cleanup,
  1694. // but also allows us to inline the function at -O0 if it is marked
  1695. // always_inline.
  1696. if (ArgsMatch)
  1697. Callee = CalleeF;
  1698. }
  1699. }
  1700. unsigned CallingConv;
  1701. CodeGen::AttributeListType AttributeList;
  1702. CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
  1703. llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
  1704. AttributeList.end());
  1705. llvm::BasicBlock *InvokeDest = 0;
  1706. if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
  1707. InvokeDest = getInvokeDest();
  1708. llvm::CallSite CS;
  1709. if (!InvokeDest) {
  1710. CS = Builder.CreateCall(Callee, Args);
  1711. } else {
  1712. llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
  1713. CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
  1714. EmitBlock(Cont);
  1715. }
  1716. if (callOrInvoke)
  1717. *callOrInvoke = CS.getInstruction();
  1718. CS.setAttributes(Attrs);
  1719. CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
  1720. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  1721. // optimizer it can aggressively ignore unwind edges.
  1722. if (CGM.getLangOpts().ObjCAutoRefCount)
  1723. AddObjCARCExceptionMetadata(CS.getInstruction());
  1724. // If the call doesn't return, finish the basic block and clear the
  1725. // insertion point; this allows the rest of IRgen to discard
  1726. // unreachable code.
  1727. if (CS.doesNotReturn()) {
  1728. Builder.CreateUnreachable();
  1729. Builder.ClearInsertionPoint();
  1730. // FIXME: For now, emit a dummy basic block because expr emitters in
  1731. // generally are not ready to handle emitting expressions at unreachable
  1732. // points.
  1733. EnsureInsertPoint();
  1734. // Return a reasonable RValue.
  1735. return GetUndefRValue(RetTy);
  1736. }
  1737. llvm::Instruction *CI = CS.getInstruction();
  1738. if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
  1739. CI->setName("call");
  1740. // Emit any writebacks immediately. Arguably this should happen
  1741. // after any return-value munging.
  1742. if (CallArgs.hasWritebacks())
  1743. emitWritebacks(*this, CallArgs);
  1744. switch (RetAI.getKind()) {
  1745. case ABIArgInfo::Indirect: {
  1746. unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
  1747. if (RetTy->isAnyComplexType())
  1748. return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
  1749. if (CodeGenFunction::hasAggregateLLVMType(RetTy))
  1750. return RValue::getAggregate(Args[0]);
  1751. return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
  1752. }
  1753. case ABIArgInfo::Ignore:
  1754. // If we are ignoring an argument that had a result, make sure to
  1755. // construct the appropriate return value for our caller.
  1756. return GetUndefRValue(RetTy);
  1757. case ABIArgInfo::Extend:
  1758. case ABIArgInfo::Direct: {
  1759. llvm::Type *RetIRTy = ConvertType(RetTy);
  1760. if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
  1761. if (RetTy->isAnyComplexType()) {
  1762. llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
  1763. llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
  1764. return RValue::getComplex(std::make_pair(Real, Imag));
  1765. }
  1766. if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
  1767. llvm::Value *DestPtr = ReturnValue.getValue();
  1768. bool DestIsVolatile = ReturnValue.isVolatile();
  1769. if (!DestPtr) {
  1770. DestPtr = CreateMemTemp(RetTy, "agg.tmp");
  1771. DestIsVolatile = false;
  1772. }
  1773. BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
  1774. return RValue::getAggregate(DestPtr);
  1775. }
  1776. // If the argument doesn't match, perform a bitcast to coerce it. This
  1777. // can happen due to trivial type mismatches.
  1778. llvm::Value *V = CI;
  1779. if (V->getType() != RetIRTy)
  1780. V = Builder.CreateBitCast(V, RetIRTy);
  1781. return RValue::get(V);
  1782. }
  1783. llvm::Value *DestPtr = ReturnValue.getValue();
  1784. bool DestIsVolatile = ReturnValue.isVolatile();
  1785. if (!DestPtr) {
  1786. DestPtr = CreateMemTemp(RetTy, "coerce");
  1787. DestIsVolatile = false;
  1788. }
  1789. // If the value is offset in memory, apply the offset now.
  1790. llvm::Value *StorePtr = DestPtr;
  1791. if (unsigned Offs = RetAI.getDirectOffset()) {
  1792. StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
  1793. StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
  1794. StorePtr = Builder.CreateBitCast(StorePtr,
  1795. llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
  1796. }
  1797. CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
  1798. unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
  1799. if (RetTy->isAnyComplexType())
  1800. return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
  1801. if (CodeGenFunction::hasAggregateLLVMType(RetTy))
  1802. return RValue::getAggregate(DestPtr);
  1803. return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
  1804. }
  1805. case ABIArgInfo::Expand:
  1806. llvm_unreachable("Invalid ABI kind for return argument");
  1807. }
  1808. llvm_unreachable("Unhandled ABIArgInfo::Kind");
  1809. }
  1810. /* VarArg handling */
  1811. llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
  1812. return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
  1813. }