123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830 |
- //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
- //
- // The LLVM Compiler Infrastructure
- //
- // This file is distributed under the University of Illinois Open Source
- // License. See LICENSE.TXT for details.
- //
- //===----------------------------------------------------------------------===//
- //
- // This contains code dealing with code generation of C++ expressions
- //
- //===----------------------------------------------------------------------===//
- #include "clang/Frontend/CodeGenOptions.h"
- #include "CodeGenFunction.h"
- #include "CGCUDARuntime.h"
- #include "CGCXXABI.h"
- #include "CGObjCRuntime.h"
- #include "CGDebugInfo.h"
- #include "llvm/Intrinsics.h"
- #include "llvm/Support/CallSite.h"
- using namespace clang;
- using namespace CodeGen;
- RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
- llvm::Value *Callee,
- ReturnValueSlot ReturnValue,
- llvm::Value *This,
- llvm::Value *VTT,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
- assert(MD->isInstance() &&
- "Trying to emit a member call expr on a static method!");
- CallArgList Args;
- // Push the this ptr.
- Args.add(RValue::get(This), MD->getThisType(getContext()));
- // If there is a VTT parameter, emit it.
- if (VTT) {
- QualType T = getContext().getPointerType(getContext().VoidPtrTy);
- Args.add(RValue::get(VTT), T);
- }
- const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
-
- // And the rest of the call args.
- EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
- return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
- FPT->getExtInfo(),
- required),
- Callee, ReturnValue, Args, MD);
- }
- static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
- const Expr *E = Base;
-
- while (true) {
- E = E->IgnoreParens();
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase ||
- CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
- break;
- }
- QualType DerivedType = E->getType();
- if (const PointerType *PTy = DerivedType->getAs<PointerType>())
- DerivedType = PTy->getPointeeType();
- return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
- }
- // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
- // quite what we want.
- static const Expr *skipNoOpCastsAndParens(const Expr *E) {
- while (true) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
- E = PE->getSubExpr();
- continue;
- }
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
- if (UO->getOpcode() == UO_Extension) {
- E = UO->getSubExpr();
- continue;
- }
- }
- return E;
- }
- }
- /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
- /// expr can be devirtualized.
- static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
- const Expr *Base,
- const CXXMethodDecl *MD) {
-
- // When building with -fapple-kext, all calls must go through the vtable since
- // the kernel linker can do runtime patching of vtables.
- if (Context.getLangOpts().AppleKext)
- return false;
- // If the most derived class is marked final, we know that no subclass can
- // override this member function and so we can devirtualize it. For example:
- //
- // struct A { virtual void f(); }
- // struct B final : A { };
- //
- // void f(B *b) {
- // b->f();
- // }
- //
- const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
- if (MostDerivedClassDecl->hasAttr<FinalAttr>())
- return true;
- // If the member function is marked 'final', we know that it can't be
- // overridden and can therefore devirtualize it.
- if (MD->hasAttr<FinalAttr>())
- return true;
- // Similarly, if the class itself is marked 'final' it can't be overridden
- // and we can therefore devirtualize the member function call.
- if (MD->getParent()->hasAttr<FinalAttr>())
- return true;
- Base = skipNoOpCastsAndParens(Base);
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- // This is a record decl. We know the type and can devirtualize it.
- return VD->getType()->isRecordType();
- }
-
- return false;
- }
-
- // We can always devirtualize calls on temporary object expressions.
- if (isa<CXXConstructExpr>(Base))
- return true;
-
- // And calls on bound temporaries.
- if (isa<CXXBindTemporaryExpr>(Base))
- return true;
-
- // Check if this is a call expr that returns a record type.
- if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
- return CE->getCallReturnType()->isRecordType();
- // We can't devirtualize the call.
- return false;
- }
- // Note: This function also emit constructor calls to support a MSVC
- // extensions allowing explicit constructor function call.
- RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
- ReturnValueSlot ReturnValue) {
- const Expr *callee = CE->getCallee()->IgnoreParens();
- if (isa<BinaryOperator>(callee))
- return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
- const MemberExpr *ME = cast<MemberExpr>(callee);
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
- CGDebugInfo *DI = getDebugInfo();
- if (DI && CGM.getCodeGenOpts().LimitDebugInfo
- && !isa<CallExpr>(ME->getBase())) {
- QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
- if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
- DI->getOrCreateRecordType(PTy->getPointeeType(),
- MD->getParent()->getLocation());
- }
- }
- if (MD->isStatic()) {
- // The method is static, emit it as we would a regular call.
- llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
- return EmitCall(getContext().getPointerType(MD->getType()), Callee,
- ReturnValue, CE->arg_begin(), CE->arg_end());
- }
- // Compute the object pointer.
- llvm::Value *This;
- if (ME->isArrow())
- This = EmitScalarExpr(ME->getBase());
- else
- This = EmitLValue(ME->getBase()).getAddress();
- if (MD->isTrivial()) {
- if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
- if (isa<CXXConstructorDecl>(MD) &&
- cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
- return RValue::get(0);
- if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
- // We don't like to generate the trivial copy/move assignment operator
- // when it isn't necessary; just produce the proper effect here.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateCopy(This, RHS, CE->getType());
- return RValue::get(This);
- }
-
- if (isa<CXXConstructorDecl>(MD) &&
- cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
- // Trivial move and copy ctor are the same.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
- CE->arg_begin(), CE->arg_end());
- return RValue::get(This);
- }
- llvm_unreachable("unknown trivial member function");
- }
- // Compute the function type we're calling.
- const CGFunctionInfo *FInfo = 0;
- if (isa<CXXDestructorDecl>(MD))
- FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
- Dtor_Complete);
- else if (isa<CXXConstructorDecl>(MD))
- FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
- cast<CXXConstructorDecl>(MD),
- Ctor_Complete);
- else
- FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
- llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
- // C++ [class.virtual]p12:
- // Explicit qualification with the scope operator (5.1) suppresses the
- // virtual call mechanism.
- //
- // We also don't emit a virtual call if the base expression has a record type
- // because then we know what the type is.
- bool UseVirtualCall;
- UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
- && !canDevirtualizeMemberFunctionCalls(getContext(),
- ME->getBase(), MD);
- llvm::Value *Callee;
- if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
- if (UseVirtualCall) {
- Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
- } else {
- if (getContext().getLangOpts().AppleKext &&
- MD->isVirtual() &&
- ME->hasQualifier())
- Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
- else
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
- }
- } else if (const CXXConstructorDecl *Ctor =
- dyn_cast<CXXConstructorDecl>(MD)) {
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
- } else if (UseVirtualCall) {
- Callee = BuildVirtualCall(MD, This, Ty);
- } else {
- if (getContext().getLangOpts().AppleKext &&
- MD->isVirtual() &&
- ME->hasQualifier())
- Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
- else
- Callee = CGM.GetAddrOfFunction(MD, Ty);
- }
- return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
- CE->arg_begin(), CE->arg_end());
- }
- RValue
- CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
- ReturnValueSlot ReturnValue) {
- const BinaryOperator *BO =
- cast<BinaryOperator>(E->getCallee()->IgnoreParens());
- const Expr *BaseExpr = BO->getLHS();
- const Expr *MemFnExpr = BO->getRHS();
-
- const MemberPointerType *MPT =
- MemFnExpr->getType()->castAs<MemberPointerType>();
- const FunctionProtoType *FPT =
- MPT->getPointeeType()->castAs<FunctionProtoType>();
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- // Get the member function pointer.
- llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
- // Emit the 'this' pointer.
- llvm::Value *This;
-
- if (BO->getOpcode() == BO_PtrMemI)
- This = EmitScalarExpr(BaseExpr);
- else
- This = EmitLValue(BaseExpr).getAddress();
- // Ask the ABI to load the callee. Note that This is modified.
- llvm::Value *Callee =
- CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
-
- CallArgList Args;
- QualType ThisType =
- getContext().getPointerType(getContext().getTagDeclType(RD));
- // Push the this ptr.
- Args.add(RValue::get(This), ThisType);
-
- // And the rest of the call args
- EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
- ReturnValue, Args);
- }
- RValue
- CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
- const CXXMethodDecl *MD,
- ReturnValueSlot ReturnValue) {
- assert(MD->isInstance() &&
- "Trying to emit a member call expr on a static method!");
- LValue LV = EmitLValue(E->getArg(0));
- llvm::Value *This = LV.getAddress();
- if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
- MD->isTrivial()) {
- llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
- QualType Ty = E->getType();
- EmitAggregateCopy(This, Src, Ty);
- return RValue::get(This);
- }
- llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
- return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
- E->arg_begin() + 1, E->arg_end());
- }
- RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
- ReturnValueSlot ReturnValue) {
- return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
- }
- static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- const CXXRecordDecl *Base) {
- if (Base->isEmpty())
- return;
- DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
- const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
- CharUnits Size = Layout.getNonVirtualSize();
- CharUnits Align = Layout.getNonVirtualAlign();
- llvm::Value *SizeVal = CGF.CGM.getSize(Size);
- // If the type contains a pointer to data member we can't memset it to zero.
- // Instead, create a null constant and copy it to the destination.
- // TODO: there are other patterns besides zero that we can usefully memset,
- // like -1, which happens to be the pattern used by member-pointers.
- // TODO: isZeroInitializable can be over-conservative in the case where a
- // virtual base contains a member pointer.
- if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
- llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
- llvm::GlobalVariable *NullVariable =
- new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
- /*isConstant=*/true,
- llvm::GlobalVariable::PrivateLinkage,
- NullConstant, Twine());
- NullVariable->setAlignment(Align.getQuantity());
- llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
- // Get and call the appropriate llvm.memcpy overload.
- CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
- return;
- }
-
- // Otherwise, just memset the whole thing to zero. This is legal
- // because in LLVM, all default initializers (other than the ones we just
- // handled above) are guaranteed to have a bit pattern of all zeros.
- CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
- Align.getQuantity());
- }
- void
- CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
- AggValueSlot Dest) {
- assert(!Dest.isIgnored() && "Must have a destination!");
- const CXXConstructorDecl *CD = E->getConstructor();
-
- // If we require zero initialization before (or instead of) calling the
- // constructor, as can be the case with a non-user-provided default
- // constructor, emit the zero initialization now, unless destination is
- // already zeroed.
- if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
- switch (E->getConstructionKind()) {
- case CXXConstructExpr::CK_Delegating:
- assert(0 && "Delegating constructor should not need zeroing");
- case CXXConstructExpr::CK_Complete:
- EmitNullInitialization(Dest.getAddr(), E->getType());
- break;
- case CXXConstructExpr::CK_VirtualBase:
- case CXXConstructExpr::CK_NonVirtualBase:
- EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
- break;
- }
- }
-
- // If this is a call to a trivial default constructor, do nothing.
- if (CD->isTrivial() && CD->isDefaultConstructor())
- return;
-
- // Elide the constructor if we're constructing from a temporary.
- // The temporary check is required because Sema sets this on NRVO
- // returns.
- if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
- assert(getContext().hasSameUnqualifiedType(E->getType(),
- E->getArg(0)->getType()));
- if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
- EmitAggExpr(E->getArg(0), Dest);
- return;
- }
- }
-
- if (const ConstantArrayType *arrayType
- = getContext().getAsConstantArrayType(E->getType())) {
- EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
- E->arg_begin(), E->arg_end());
- } else {
- CXXCtorType Type = Ctor_Complete;
- bool ForVirtualBase = false;
- switch (E->getConstructionKind()) {
- case CXXConstructExpr::CK_Delegating:
- // We should be emitting a constructor; GlobalDecl will assert this
- Type = CurGD.getCtorType();
- break;
- case CXXConstructExpr::CK_Complete:
- Type = Ctor_Complete;
- break;
- case CXXConstructExpr::CK_VirtualBase:
- ForVirtualBase = true;
- // fall-through
- case CXXConstructExpr::CK_NonVirtualBase:
- Type = Ctor_Base;
- }
-
- // Call the constructor.
- EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
- E->arg_begin(), E->arg_end());
- }
- }
- void
- CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
- llvm::Value *Src,
- const Expr *Exp) {
- if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
- Exp = E->getSubExpr();
- assert(isa<CXXConstructExpr>(Exp) &&
- "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
- const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
- const CXXConstructorDecl *CD = E->getConstructor();
- RunCleanupsScope Scope(*this);
-
- // If we require zero initialization before (or instead of) calling the
- // constructor, as can be the case with a non-user-provided default
- // constructor, emit the zero initialization now.
- // FIXME. Do I still need this for a copy ctor synthesis?
- if (E->requiresZeroInitialization())
- EmitNullInitialization(Dest, E->getType());
-
- assert(!getContext().getAsConstantArrayType(E->getType())
- && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
- EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
- E->arg_begin(), E->arg_end());
- }
- static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
- const CXXNewExpr *E) {
- if (!E->isArray())
- return CharUnits::Zero();
- // No cookie is required if the operator new[] being used is the
- // reserved placement operator new[].
- if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
- return CharUnits::Zero();
- return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
- }
- static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
- const CXXNewExpr *e,
- unsigned minElements,
- llvm::Value *&numElements,
- llvm::Value *&sizeWithoutCookie) {
- QualType type = e->getAllocatedType();
- if (!e->isArray()) {
- CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
- sizeWithoutCookie
- = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
- return sizeWithoutCookie;
- }
- // The width of size_t.
- unsigned sizeWidth = CGF.SizeTy->getBitWidth();
- // Figure out the cookie size.
- llvm::APInt cookieSize(sizeWidth,
- CalculateCookiePadding(CGF, e).getQuantity());
- // Emit the array size expression.
- // We multiply the size of all dimensions for NumElements.
- // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
- numElements = CGF.EmitScalarExpr(e->getArraySize());
- assert(isa<llvm::IntegerType>(numElements->getType()));
- // The number of elements can be have an arbitrary integer type;
- // essentially, we need to multiply it by a constant factor, add a
- // cookie size, and verify that the result is representable as a
- // size_t. That's just a gloss, though, and it's wrong in one
- // important way: if the count is negative, it's an error even if
- // the cookie size would bring the total size >= 0.
- bool isSigned
- = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
- llvm::IntegerType *numElementsType
- = cast<llvm::IntegerType>(numElements->getType());
- unsigned numElementsWidth = numElementsType->getBitWidth();
- // Compute the constant factor.
- llvm::APInt arraySizeMultiplier(sizeWidth, 1);
- while (const ConstantArrayType *CAT
- = CGF.getContext().getAsConstantArrayType(type)) {
- type = CAT->getElementType();
- arraySizeMultiplier *= CAT->getSize();
- }
- CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
- llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
- typeSizeMultiplier *= arraySizeMultiplier;
- // This will be a size_t.
- llvm::Value *size;
-
- // If someone is doing 'new int[42]' there is no need to do a dynamic check.
- // Don't bloat the -O0 code.
- if (llvm::ConstantInt *numElementsC =
- dyn_cast<llvm::ConstantInt>(numElements)) {
- const llvm::APInt &count = numElementsC->getValue();
- bool hasAnyOverflow = false;
- // If 'count' was a negative number, it's an overflow.
- if (isSigned && count.isNegative())
- hasAnyOverflow = true;
- // We want to do all this arithmetic in size_t. If numElements is
- // wider than that, check whether it's already too big, and if so,
- // overflow.
- else if (numElementsWidth > sizeWidth &&
- numElementsWidth - sizeWidth > count.countLeadingZeros())
- hasAnyOverflow = true;
- // Okay, compute a count at the right width.
- llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
- // If there is a brace-initializer, we cannot allocate fewer elements than
- // there are initializers. If we do, that's treated like an overflow.
- if (adjustedCount.ult(minElements))
- hasAnyOverflow = true;
- // Scale numElements by that. This might overflow, but we don't
- // care because it only overflows if allocationSize does, too, and
- // if that overflows then we shouldn't use this.
- numElements = llvm::ConstantInt::get(CGF.SizeTy,
- adjustedCount * arraySizeMultiplier);
- // Compute the size before cookie, and track whether it overflowed.
- bool overflow;
- llvm::APInt allocationSize
- = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
- hasAnyOverflow |= overflow;
- // Add in the cookie, and check whether it's overflowed.
- if (cookieSize != 0) {
- // Save the current size without a cookie. This shouldn't be
- // used if there was overflow.
- sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
- allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
- hasAnyOverflow |= overflow;
- }
- // On overflow, produce a -1 so operator new will fail.
- if (hasAnyOverflow) {
- size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
- } else {
- size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
- }
- // Otherwise, we might need to use the overflow intrinsics.
- } else {
- // There are up to five conditions we need to test for:
- // 1) if isSigned, we need to check whether numElements is negative;
- // 2) if numElementsWidth > sizeWidth, we need to check whether
- // numElements is larger than something representable in size_t;
- // 3) if minElements > 0, we need to check whether numElements is smaller
- // than that.
- // 4) we need to compute
- // sizeWithoutCookie := numElements * typeSizeMultiplier
- // and check whether it overflows; and
- // 5) if we need a cookie, we need to compute
- // size := sizeWithoutCookie + cookieSize
- // and check whether it overflows.
- llvm::Value *hasOverflow = 0;
- // If numElementsWidth > sizeWidth, then one way or another, we're
- // going to have to do a comparison for (2), and this happens to
- // take care of (1), too.
- if (numElementsWidth > sizeWidth) {
- llvm::APInt threshold(numElementsWidth, 1);
- threshold <<= sizeWidth;
- llvm::Value *thresholdV
- = llvm::ConstantInt::get(numElementsType, threshold);
- hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
- numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
- // Otherwise, if we're signed, we want to sext up to size_t.
- } else if (isSigned) {
- if (numElementsWidth < sizeWidth)
- numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
-
- // If there's a non-1 type size multiplier, then we can do the
- // signedness check at the same time as we do the multiply
- // because a negative number times anything will cause an
- // unsigned overflow. Otherwise, we have to do it here. But at least
- // in this case, we can subsume the >= minElements check.
- if (typeSizeMultiplier == 1)
- hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
- llvm::ConstantInt::get(CGF.SizeTy, minElements));
- // Otherwise, zext up to size_t if necessary.
- } else if (numElementsWidth < sizeWidth) {
- numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
- }
- assert(numElements->getType() == CGF.SizeTy);
- if (minElements) {
- // Don't allow allocation of fewer elements than we have initializers.
- if (!hasOverflow) {
- hasOverflow = CGF.Builder.CreateICmpULT(numElements,
- llvm::ConstantInt::get(CGF.SizeTy, minElements));
- } else if (numElementsWidth > sizeWidth) {
- // The other existing overflow subsumes this check.
- // We do an unsigned comparison, since any signed value < -1 is
- // taken care of either above or below.
- hasOverflow = CGF.Builder.CreateOr(hasOverflow,
- CGF.Builder.CreateICmpULT(numElements,
- llvm::ConstantInt::get(CGF.SizeTy, minElements)));
- }
- }
- size = numElements;
- // Multiply by the type size if necessary. This multiplier
- // includes all the factors for nested arrays.
- //
- // This step also causes numElements to be scaled up by the
- // nested-array factor if necessary. Overflow on this computation
- // can be ignored because the result shouldn't be used if
- // allocation fails.
- if (typeSizeMultiplier != 1) {
- llvm::Value *umul_with_overflow
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
- llvm::Value *tsmV =
- llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
- llvm::Value *result =
- CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
- llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
- if (hasOverflow)
- hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
- else
- hasOverflow = overflowed;
- size = CGF.Builder.CreateExtractValue(result, 0);
- // Also scale up numElements by the array size multiplier.
- if (arraySizeMultiplier != 1) {
- // If the base element type size is 1, then we can re-use the
- // multiply we just did.
- if (typeSize.isOne()) {
- assert(arraySizeMultiplier == typeSizeMultiplier);
- numElements = size;
- // Otherwise we need a separate multiply.
- } else {
- llvm::Value *asmV =
- llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
- numElements = CGF.Builder.CreateMul(numElements, asmV);
- }
- }
- } else {
- // numElements doesn't need to be scaled.
- assert(arraySizeMultiplier == 1);
- }
-
- // Add in the cookie size if necessary.
- if (cookieSize != 0) {
- sizeWithoutCookie = size;
- llvm::Value *uadd_with_overflow
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
- llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
- llvm::Value *result =
- CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
- llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
- if (hasOverflow)
- hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
- else
- hasOverflow = overflowed;
- size = CGF.Builder.CreateExtractValue(result, 0);
- }
- // If we had any possibility of dynamic overflow, make a select to
- // overwrite 'size' with an all-ones value, which should cause
- // operator new to throw.
- if (hasOverflow)
- size = CGF.Builder.CreateSelect(hasOverflow,
- llvm::Constant::getAllOnesValue(CGF.SizeTy),
- size);
- }
- if (cookieSize == 0)
- sizeWithoutCookie = size;
- else
- assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
- return size;
- }
- static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
- QualType AllocType, llvm::Value *NewPtr) {
- CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
- if (!CGF.hasAggregateLLVMType(AllocType))
- CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
- Alignment),
- false);
- else if (AllocType->isAnyComplexType())
- CGF.EmitComplexExprIntoAddr(Init, NewPtr,
- AllocType.isVolatileQualified());
- else {
- AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
- CGF.EmitAggExpr(Init, Slot);
- CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
- }
- }
- void
- CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
- QualType elementType,
- llvm::Value *beginPtr,
- llvm::Value *numElements) {
- if (!E->hasInitializer())
- return; // We have a POD type.
- llvm::Value *explicitPtr = beginPtr;
- // Find the end of the array, hoisted out of the loop.
- llvm::Value *endPtr =
- Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
- unsigned initializerElements = 0;
- const Expr *Init = E->getInitializer();
- llvm::AllocaInst *endOfInit = 0;
- QualType::DestructionKind dtorKind = elementType.isDestructedType();
- EHScopeStack::stable_iterator cleanup;
- llvm::Instruction *cleanupDominator = 0;
- // If the initializer is an initializer list, first do the explicit elements.
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
- initializerElements = ILE->getNumInits();
- // Enter a partial-destruction cleanup if necessary.
- if (needsEHCleanup(dtorKind)) {
- // In principle we could tell the cleanup where we are more
- // directly, but the control flow can get so varied here that it
- // would actually be quite complex. Therefore we go through an
- // alloca.
- endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
- cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
- pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
- getDestroyer(dtorKind));
- cleanup = EHStack.stable_begin();
- }
- for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
- // Tell the cleanup that it needs to destroy up to this
- // element. TODO: some of these stores can be trivially
- // observed to be unnecessary.
- if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
- StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
- explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
- }
- // The remaining elements are filled with the array filler expression.
- Init = ILE->getArrayFiller();
- }
- // Create the continuation block.
- llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
- // If the number of elements isn't constant, we have to now check if there is
- // anything left to initialize.
- if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
- // If all elements have already been initialized, skip the whole loop.
- if (constNum->getZExtValue() <= initializerElements) {
- // If there was a cleanup, deactivate it.
- if (cleanupDominator)
- DeactivateCleanupBlock(cleanup, cleanupDominator);;
- return;
- }
- } else {
- llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
- llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
- "array.isempty");
- Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
- EmitBlock(nonEmptyBB);
- }
- // Enter the loop.
- llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
- llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
- EmitBlock(loopBB);
- // Set up the current-element phi.
- llvm::PHINode *curPtr =
- Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
- curPtr->addIncoming(explicitPtr, entryBB);
- // Store the new cleanup position for irregular cleanups.
- if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
- // Enter a partial-destruction cleanup if necessary.
- if (!cleanupDominator && needsEHCleanup(dtorKind)) {
- pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
- getDestroyer(dtorKind));
- cleanup = EHStack.stable_begin();
- cleanupDominator = Builder.CreateUnreachable();
- }
- // Emit the initializer into this element.
- StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
- // Leave the cleanup if we entered one.
- if (cleanupDominator) {
- DeactivateCleanupBlock(cleanup, cleanupDominator);
- cleanupDominator->eraseFromParent();
- }
- // Advance to the next element.
- llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
- // Check whether we've gotten to the end of the array and, if so,
- // exit the loop.
- llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
- Builder.CreateCondBr(isEnd, contBB, loopBB);
- curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
- EmitBlock(contBB);
- }
- static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
- llvm::Value *NewPtr, llvm::Value *Size) {
- CGF.EmitCastToVoidPtr(NewPtr);
- CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
- CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
- Alignment.getQuantity(), false);
- }
-
- static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
- QualType ElementType,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- llvm::Value *AllocSizeWithoutCookie) {
- const Expr *Init = E->getInitializer();
- if (E->isArray()) {
- if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
- CXXConstructorDecl *Ctor = CCE->getConstructor();
- bool RequiresZeroInitialization = false;
- if (Ctor->isTrivial()) {
- // If new expression did not specify value-initialization, then there
- // is no initialization.
- if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
- return;
-
- if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
- // Optimization: since zero initialization will just set the memory
- // to all zeroes, generate a single memset to do it in one shot.
- EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
- return;
- }
- RequiresZeroInitialization = true;
- }
- CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
- CCE->arg_begin(), CCE->arg_end(),
- RequiresZeroInitialization);
- return;
- } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
- CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
- // Optimization: since zero initialization will just set the memory
- // to all zeroes, generate a single memset to do it in one shot.
- EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
- return;
- }
- CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
- return;
- }
- if (!Init)
- return;
- StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
- }
- namespace {
- /// A cleanup to call the given 'operator delete' function upon
- /// abnormal exit from a new expression.
- class CallDeleteDuringNew : public EHScopeStack::Cleanup {
- size_t NumPlacementArgs;
- const FunctionDecl *OperatorDelete;
- llvm::Value *Ptr;
- llvm::Value *AllocSize;
- RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
- public:
- static size_t getExtraSize(size_t NumPlacementArgs) {
- return NumPlacementArgs * sizeof(RValue);
- }
- CallDeleteDuringNew(size_t NumPlacementArgs,
- const FunctionDecl *OperatorDelete,
- llvm::Value *Ptr,
- llvm::Value *AllocSize)
- : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
- Ptr(Ptr), AllocSize(AllocSize) {}
- void setPlacementArg(unsigned I, RValue Arg) {
- assert(I < NumPlacementArgs && "index out of range");
- getPlacementArgs()[I] = Arg;
- }
- void Emit(CodeGenFunction &CGF, Flags flags) {
- const FunctionProtoType *FPT
- = OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
- (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
- CallArgList DeleteArgs;
- // The first argument is always a void*.
- FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
- DeleteArgs.add(RValue::get(Ptr), *AI++);
- // A member 'operator delete' can take an extra 'size_t' argument.
- if (FPT->getNumArgs() == NumPlacementArgs + 2)
- DeleteArgs.add(RValue::get(AllocSize), *AI++);
- // Pass the rest of the arguments, which must match exactly.
- for (unsigned I = 0; I != NumPlacementArgs; ++I)
- DeleteArgs.add(getPlacementArgs()[I], *AI++);
- // Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
- CGF.CGM.GetAddrOfFunction(OperatorDelete),
- ReturnValueSlot(), DeleteArgs, OperatorDelete);
- }
- };
- /// A cleanup to call the given 'operator delete' function upon
- /// abnormal exit from a new expression when the new expression is
- /// conditional.
- class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
- size_t NumPlacementArgs;
- const FunctionDecl *OperatorDelete;
- DominatingValue<RValue>::saved_type Ptr;
- DominatingValue<RValue>::saved_type AllocSize;
- DominatingValue<RValue>::saved_type *getPlacementArgs() {
- return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
- }
- public:
- static size_t getExtraSize(size_t NumPlacementArgs) {
- return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
- }
- CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
- const FunctionDecl *OperatorDelete,
- DominatingValue<RValue>::saved_type Ptr,
- DominatingValue<RValue>::saved_type AllocSize)
- : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
- Ptr(Ptr), AllocSize(AllocSize) {}
- void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
- assert(I < NumPlacementArgs && "index out of range");
- getPlacementArgs()[I] = Arg;
- }
- void Emit(CodeGenFunction &CGF, Flags flags) {
- const FunctionProtoType *FPT
- = OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
- (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
- CallArgList DeleteArgs;
- // The first argument is always a void*.
- FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
- DeleteArgs.add(Ptr.restore(CGF), *AI++);
- // A member 'operator delete' can take an extra 'size_t' argument.
- if (FPT->getNumArgs() == NumPlacementArgs + 2) {
- RValue RV = AllocSize.restore(CGF);
- DeleteArgs.add(RV, *AI++);
- }
- // Pass the rest of the arguments, which must match exactly.
- for (unsigned I = 0; I != NumPlacementArgs; ++I) {
- RValue RV = getPlacementArgs()[I].restore(CGF);
- DeleteArgs.add(RV, *AI++);
- }
- // Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
- CGF.CGM.GetAddrOfFunction(OperatorDelete),
- ReturnValueSlot(), DeleteArgs, OperatorDelete);
- }
- };
- }
- /// Enter a cleanup to call 'operator delete' if the initializer in a
- /// new-expression throws.
- static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
- const CXXNewExpr *E,
- llvm::Value *NewPtr,
- llvm::Value *AllocSize,
- const CallArgList &NewArgs) {
- // If we're not inside a conditional branch, then the cleanup will
- // dominate and we can do the easier (and more efficient) thing.
- if (!CGF.isInConditionalBranch()) {
- CallDeleteDuringNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- NewPtr, AllocSize);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
- return;
- }
- // Otherwise, we need to save all this stuff.
- DominatingValue<RValue>::saved_type SavedNewPtr =
- DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
- DominatingValue<RValue>::saved_type SavedAllocSize =
- DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
- CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- SavedNewPtr,
- SavedAllocSize);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I,
- DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
- CGF.initFullExprCleanup();
- }
- llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
- // The element type being allocated.
- QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
- // 1. Build a call to the allocation function.
- FunctionDecl *allocator = E->getOperatorNew();
- const FunctionProtoType *allocatorType =
- allocator->getType()->castAs<FunctionProtoType>();
- CallArgList allocatorArgs;
- // The allocation size is the first argument.
- QualType sizeType = getContext().getSizeType();
- // If there is a brace-initializer, cannot allocate fewer elements than inits.
- unsigned minElements = 0;
- if (E->isArray() && E->hasInitializer()) {
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
- minElements = ILE->getNumInits();
- }
- llvm::Value *numElements = 0;
- llvm::Value *allocSizeWithoutCookie = 0;
- llvm::Value *allocSize =
- EmitCXXNewAllocSize(*this, E, minElements, numElements,
- allocSizeWithoutCookie);
-
- allocatorArgs.add(RValue::get(allocSize), sizeType);
- // Emit the rest of the arguments.
- // FIXME: Ideally, this should just use EmitCallArgs.
- CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
- // First, use the types from the function type.
- // We start at 1 here because the first argument (the allocation size)
- // has already been emitted.
- for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
- ++i, ++placementArg) {
- QualType argType = allocatorType->getArgType(i);
- assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
- placementArg->getType()) &&
- "type mismatch in call argument!");
- EmitCallArg(allocatorArgs, *placementArg, argType);
- }
- // Either we've emitted all the call args, or we have a call to a
- // variadic function.
- assert((placementArg == E->placement_arg_end() ||
- allocatorType->isVariadic()) &&
- "Extra arguments to non-variadic function!");
- // If we still have any arguments, emit them using the type of the argument.
- for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
- placementArg != placementArgsEnd; ++placementArg) {
- EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
- }
- // Emit the allocation call. If the allocator is a global placement
- // operator, just "inline" it directly.
- RValue RV;
- if (allocator->isReservedGlobalPlacementOperator()) {
- assert(allocatorArgs.size() == 2);
- RV = allocatorArgs[1].RV;
- // TODO: kill any unnecessary computations done for the size
- // argument.
- } else {
- RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
- allocatorType),
- CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
- allocatorArgs, allocator);
- }
- // Emit a null check on the allocation result if the allocation
- // function is allowed to return null (because it has a non-throwing
- // exception spec; for this part, we inline
- // CXXNewExpr::shouldNullCheckAllocation()) and we have an
- // interesting initializer.
- bool nullCheck = allocatorType->isNothrow(getContext()) &&
- (!allocType.isPODType(getContext()) || E->hasInitializer());
- llvm::BasicBlock *nullCheckBB = 0;
- llvm::BasicBlock *contBB = 0;
- llvm::Value *allocation = RV.getScalarVal();
- unsigned AS =
- cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
- // The null-check means that the initializer is conditionally
- // evaluated.
- ConditionalEvaluation conditional(*this);
- if (nullCheck) {
- conditional.begin(*this);
- nullCheckBB = Builder.GetInsertBlock();
- llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
- contBB = createBasicBlock("new.cont");
- llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
- Builder.CreateCondBr(isNull, contBB, notNullBB);
- EmitBlock(notNullBB);
- }
- // If there's an operator delete, enter a cleanup to call it if an
- // exception is thrown.
- EHScopeStack::stable_iterator operatorDeleteCleanup;
- llvm::Instruction *cleanupDominator = 0;
- if (E->getOperatorDelete() &&
- !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
- EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
- operatorDeleteCleanup = EHStack.stable_begin();
- cleanupDominator = Builder.CreateUnreachable();
- }
- assert((allocSize == allocSizeWithoutCookie) ==
- CalculateCookiePadding(*this, E).isZero());
- if (allocSize != allocSizeWithoutCookie) {
- assert(E->isArray());
- allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
- numElements,
- E, allocType);
- }
- llvm::Type *elementPtrTy
- = ConvertTypeForMem(allocType)->getPointerTo(AS);
- llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
- EmitNewInitializer(*this, E, allocType, result, numElements,
- allocSizeWithoutCookie);
- if (E->isArray()) {
- // NewPtr is a pointer to the base element type. If we're
- // allocating an array of arrays, we'll need to cast back to the
- // array pointer type.
- llvm::Type *resultType = ConvertTypeForMem(E->getType());
- if (result->getType() != resultType)
- result = Builder.CreateBitCast(result, resultType);
- }
- // Deactivate the 'operator delete' cleanup if we finished
- // initialization.
- if (operatorDeleteCleanup.isValid()) {
- DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
- cleanupDominator->eraseFromParent();
- }
- if (nullCheck) {
- conditional.end(*this);
- llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
- EmitBlock(contBB);
- llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
- PHI->addIncoming(result, notNullBB);
- PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
- nullCheckBB);
- result = PHI;
- }
-
- return result;
- }
- void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
- llvm::Value *Ptr,
- QualType DeleteTy) {
- assert(DeleteFD->getOverloadedOperator() == OO_Delete);
- const FunctionProtoType *DeleteFTy =
- DeleteFD->getType()->getAs<FunctionProtoType>();
- CallArgList DeleteArgs;
- // Check if we need to pass the size to the delete operator.
- llvm::Value *Size = 0;
- QualType SizeTy;
- if (DeleteFTy->getNumArgs() == 2) {
- SizeTy = DeleteFTy->getArgType(1);
- CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
- Size = llvm::ConstantInt::get(ConvertType(SizeTy),
- DeleteTypeSize.getQuantity());
- }
-
- QualType ArgTy = DeleteFTy->getArgType(0);
- llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
- DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
- if (Size)
- DeleteArgs.add(RValue::get(Size), SizeTy);
- // Emit the call to delete.
- EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
- CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
- DeleteArgs, DeleteFD);
- }
- namespace {
- /// Calls the given 'operator delete' on a single object.
- struct CallObjectDelete : EHScopeStack::Cleanup {
- llvm::Value *Ptr;
- const FunctionDecl *OperatorDelete;
- QualType ElementType;
- CallObjectDelete(llvm::Value *Ptr,
- const FunctionDecl *OperatorDelete,
- QualType ElementType)
- : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
- void Emit(CodeGenFunction &CGF, Flags flags) {
- CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
- }
- };
- }
- /// Emit the code for deleting a single object.
- static void EmitObjectDelete(CodeGenFunction &CGF,
- const FunctionDecl *OperatorDelete,
- llvm::Value *Ptr,
- QualType ElementType,
- bool UseGlobalDelete) {
- // Find the destructor for the type, if applicable. If the
- // destructor is virtual, we'll just emit the vcall and return.
- const CXXDestructorDecl *Dtor = 0;
- if (const RecordType *RT = ElementType->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
- Dtor = RD->getDestructor();
- if (Dtor->isVirtual()) {
- if (UseGlobalDelete) {
- // If we're supposed to call the global delete, make sure we do so
- // even if the destructor throws.
- CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- Ptr, OperatorDelete,
- ElementType);
- }
-
- llvm::Type *Ty =
- CGF.getTypes().GetFunctionType(
- CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
-
- llvm::Value *Callee
- = CGF.BuildVirtualCall(Dtor,
- UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
- Ptr, Ty);
- CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
- 0, 0);
- if (UseGlobalDelete) {
- CGF.PopCleanupBlock();
- }
-
- return;
- }
- }
- }
- // Make sure that we call delete even if the dtor throws.
- // This doesn't have to a conditional cleanup because we're going
- // to pop it off in a second.
- CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- Ptr, OperatorDelete, ElementType);
- if (Dtor)
- CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
- /*ForVirtualBase=*/false, Ptr);
- else if (CGF.getLangOpts().ObjCAutoRefCount &&
- ElementType->isObjCLifetimeType()) {
- switch (ElementType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- break;
- case Qualifiers::OCL_Strong: {
- // Load the pointer value.
- llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
- ElementType.isVolatileQualified());
-
- CGF.EmitARCRelease(PtrValue, /*precise*/ true);
- break;
- }
-
- case Qualifiers::OCL_Weak:
- CGF.EmitARCDestroyWeak(Ptr);
- break;
- }
- }
-
- CGF.PopCleanupBlock();
- }
- namespace {
- /// Calls the given 'operator delete' on an array of objects.
- struct CallArrayDelete : EHScopeStack::Cleanup {
- llvm::Value *Ptr;
- const FunctionDecl *OperatorDelete;
- llvm::Value *NumElements;
- QualType ElementType;
- CharUnits CookieSize;
- CallArrayDelete(llvm::Value *Ptr,
- const FunctionDecl *OperatorDelete,
- llvm::Value *NumElements,
- QualType ElementType,
- CharUnits CookieSize)
- : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
- ElementType(ElementType), CookieSize(CookieSize) {}
- void Emit(CodeGenFunction &CGF, Flags flags) {
- const FunctionProtoType *DeleteFTy =
- OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
- CallArgList Args;
-
- // Pass the pointer as the first argument.
- QualType VoidPtrTy = DeleteFTy->getArgType(0);
- llvm::Value *DeletePtr
- = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
- Args.add(RValue::get(DeletePtr), VoidPtrTy);
- // Pass the original requested size as the second argument.
- if (DeleteFTy->getNumArgs() == 2) {
- QualType size_t = DeleteFTy->getArgType(1);
- llvm::IntegerType *SizeTy
- = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
-
- CharUnits ElementTypeSize =
- CGF.CGM.getContext().getTypeSizeInChars(ElementType);
- // The size of an element, multiplied by the number of elements.
- llvm::Value *Size
- = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
- Size = CGF.Builder.CreateMul(Size, NumElements);
- // Plus the size of the cookie if applicable.
- if (!CookieSize.isZero()) {
- llvm::Value *CookieSizeV
- = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
- Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
- }
- Args.add(RValue::get(Size), size_t);
- }
- // Emit the call to delete.
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
- CGF.CGM.GetAddrOfFunction(OperatorDelete),
- ReturnValueSlot(), Args, OperatorDelete);
- }
- };
- }
- /// Emit the code for deleting an array of objects.
- static void EmitArrayDelete(CodeGenFunction &CGF,
- const CXXDeleteExpr *E,
- llvm::Value *deletedPtr,
- QualType elementType) {
- llvm::Value *numElements = 0;
- llvm::Value *allocatedPtr = 0;
- CharUnits cookieSize;
- CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
- numElements, allocatedPtr, cookieSize);
- assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
- // Make sure that we call delete even if one of the dtors throws.
- const FunctionDecl *operatorDelete = E->getOperatorDelete();
- CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
- allocatedPtr, operatorDelete,
- numElements, elementType,
- cookieSize);
- // Destroy the elements.
- if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
- assert(numElements && "no element count for a type with a destructor!");
- llvm::Value *arrayEnd =
- CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
- // Note that it is legal to allocate a zero-length array, and we
- // can never fold the check away because the length should always
- // come from a cookie.
- CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
- CGF.getDestroyer(dtorKind),
- /*checkZeroLength*/ true,
- CGF.needsEHCleanup(dtorKind));
- }
- // Pop the cleanup block.
- CGF.PopCleanupBlock();
- }
- void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
-
- // Get at the argument before we performed the implicit conversion
- // to void*.
- const Expr *Arg = E->getArgument();
- while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
- if (ICE->getCastKind() != CK_UserDefinedConversion &&
- ICE->getType()->isVoidPointerType())
- Arg = ICE->getSubExpr();
- else
- break;
- }
- llvm::Value *Ptr = EmitScalarExpr(Arg);
- // Null check the pointer.
- llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
- llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
- Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
- EmitBlock(DeleteNotNull);
- // We might be deleting a pointer to array. If so, GEP down to the
- // first non-array element.
- // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
- QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
- if (DeleteTy->isConstantArrayType()) {
- llvm::Value *Zero = Builder.getInt32(0);
- SmallVector<llvm::Value*,8> GEP;
- GEP.push_back(Zero); // point at the outermost array
- // For each layer of array type we're pointing at:
- while (const ConstantArrayType *Arr
- = getContext().getAsConstantArrayType(DeleteTy)) {
- // 1. Unpeel the array type.
- DeleteTy = Arr->getElementType();
- // 2. GEP to the first element of the array.
- GEP.push_back(Zero);
- }
- Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
- }
- assert(ConvertTypeForMem(DeleteTy) ==
- cast<llvm::PointerType>(Ptr->getType())->getElementType());
- if (E->isArrayForm()) {
- EmitArrayDelete(*this, E, Ptr, DeleteTy);
- } else {
- EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
- E->isGlobalDelete());
- }
- EmitBlock(DeleteEnd);
- }
- static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
- // void __cxa_bad_typeid();
- llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
-
- return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
- }
- static void EmitBadTypeidCall(CodeGenFunction &CGF) {
- llvm::Value *Fn = getBadTypeidFn(CGF);
- CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
- CGF.Builder.CreateUnreachable();
- }
- static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
- const Expr *E,
- llvm::Type *StdTypeInfoPtrTy) {
- // Get the vtable pointer.
- llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
- // C++ [expr.typeid]p2:
- // If the glvalue expression is obtained by applying the unary * operator to
- // a pointer and the pointer is a null pointer value, the typeid expression
- // throws the std::bad_typeid exception.
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
- if (UO->getOpcode() == UO_Deref) {
- llvm::BasicBlock *BadTypeidBlock =
- CGF.createBasicBlock("typeid.bad_typeid");
- llvm::BasicBlock *EndBlock =
- CGF.createBasicBlock("typeid.end");
- llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
- CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
- CGF.EmitBlock(BadTypeidBlock);
- EmitBadTypeidCall(CGF);
- CGF.EmitBlock(EndBlock);
- }
- }
- llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
- StdTypeInfoPtrTy->getPointerTo());
- // Load the type info.
- Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
- return CGF.Builder.CreateLoad(Value);
- }
- llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
- llvm::Type *StdTypeInfoPtrTy =
- ConvertType(E->getType())->getPointerTo();
-
- if (E->isTypeOperand()) {
- llvm::Constant *TypeInfo =
- CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
- return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
- }
- // C++ [expr.typeid]p2:
- // When typeid is applied to a glvalue expression whose type is a
- // polymorphic class type, the result refers to a std::type_info object
- // representing the type of the most derived object (that is, the dynamic
- // type) to which the glvalue refers.
- if (E->getExprOperand()->isGLValue()) {
- if (const RecordType *RT =
- E->getExprOperand()->getType()->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->isPolymorphic())
- return EmitTypeidFromVTable(*this, E->getExprOperand(),
- StdTypeInfoPtrTy);
- }
- }
- QualType OperandTy = E->getExprOperand()->getType();
- return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
- StdTypeInfoPtrTy);
- }
- static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
- // void *__dynamic_cast(const void *sub,
- // const abi::__class_type_info *src,
- // const abi::__class_type_info *dst,
- // std::ptrdiff_t src2dst_offset);
-
- llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
- llvm::Type *PtrDiffTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
- llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
-
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(Int8PtrTy, Args, false);
-
- return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
- }
- static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
- // void __cxa_bad_cast();
- llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
- return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
- }
- static void EmitBadCastCall(CodeGenFunction &CGF) {
- llvm::Value *Fn = getBadCastFn(CGF);
- CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
- CGF.Builder.CreateUnreachable();
- }
- static llvm::Value *
- EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
- QualType SrcTy, QualType DestTy,
- llvm::BasicBlock *CastEnd) {
- llvm::Type *PtrDiffLTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
- llvm::Type *DestLTy = CGF.ConvertType(DestTy);
- if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
- if (PTy->getPointeeType()->isVoidType()) {
- // C++ [expr.dynamic.cast]p7:
- // If T is "pointer to cv void," then the result is a pointer to the
- // most derived object pointed to by v.
- // Get the vtable pointer.
- llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
- // Get the offset-to-top from the vtable.
- llvm::Value *OffsetToTop =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
- OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
- // Finally, add the offset to the pointer.
- Value = CGF.EmitCastToVoidPtr(Value);
- Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
- return CGF.Builder.CreateBitCast(Value, DestLTy);
- }
- }
- QualType SrcRecordTy;
- QualType DestRecordTy;
-
- if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
- SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
- DestRecordTy = DestPTy->getPointeeType();
- } else {
- SrcRecordTy = SrcTy;
- DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
- }
- assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
- assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
- llvm::Value *SrcRTTI =
- CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
- llvm::Value *DestRTTI =
- CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
- // FIXME: Actually compute a hint here.
- llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
- // Emit the call to __dynamic_cast.
- Value = CGF.EmitCastToVoidPtr(Value);
- Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
- SrcRTTI, DestRTTI, OffsetHint);
- Value = CGF.Builder.CreateBitCast(Value, DestLTy);
- /// C++ [expr.dynamic.cast]p9:
- /// A failed cast to reference type throws std::bad_cast
- if (DestTy->isReferenceType()) {
- llvm::BasicBlock *BadCastBlock =
- CGF.createBasicBlock("dynamic_cast.bad_cast");
- llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
- CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
- CGF.EmitBlock(BadCastBlock);
- EmitBadCastCall(CGF);
- }
- return Value;
- }
- static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
- QualType DestTy) {
- llvm::Type *DestLTy = CGF.ConvertType(DestTy);
- if (DestTy->isPointerType())
- return llvm::Constant::getNullValue(DestLTy);
- /// C++ [expr.dynamic.cast]p9:
- /// A failed cast to reference type throws std::bad_cast
- EmitBadCastCall(CGF);
- CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
- return llvm::UndefValue::get(DestLTy);
- }
- llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
- const CXXDynamicCastExpr *DCE) {
- QualType DestTy = DCE->getTypeAsWritten();
- if (DCE->isAlwaysNull())
- return EmitDynamicCastToNull(*this, DestTy);
- QualType SrcTy = DCE->getSubExpr()->getType();
- // C++ [expr.dynamic.cast]p4:
- // If the value of v is a null pointer value in the pointer case, the result
- // is the null pointer value of type T.
- bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
-
- llvm::BasicBlock *CastNull = 0;
- llvm::BasicBlock *CastNotNull = 0;
- llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
-
- if (ShouldNullCheckSrcValue) {
- CastNull = createBasicBlock("dynamic_cast.null");
- CastNotNull = createBasicBlock("dynamic_cast.notnull");
- llvm::Value *IsNull = Builder.CreateIsNull(Value);
- Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
- EmitBlock(CastNotNull);
- }
- Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
- if (ShouldNullCheckSrcValue) {
- EmitBranch(CastEnd);
- EmitBlock(CastNull);
- EmitBranch(CastEnd);
- }
- EmitBlock(CastEnd);
- if (ShouldNullCheckSrcValue) {
- llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
- PHI->addIncoming(Value, CastNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
- Value = PHI;
- }
- return Value;
- }
- void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
- RunCleanupsScope Scope(*this);
- CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
- for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
- e = E->capture_init_end();
- i != e; ++i, ++CurField) {
- // Emit initialization
- LValue LV = EmitLValueForFieldInitialization(Slot.getAddr(), *CurField, 0);
- ArrayRef<VarDecl *> ArrayIndexes;
- if (CurField->getType()->isArrayType())
- ArrayIndexes = E->getCaptureInitIndexVars(i);
- EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
- }
- }
|