CGExprAgg.cpp 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This contains code to emit Aggregate Expr nodes as LLVM code.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CodeGenFunction.h"
  14. #include "CodeGenModule.h"
  15. #include "CGObjCRuntime.h"
  16. #include "clang/AST/ASTContext.h"
  17. #include "clang/AST/DeclCXX.h"
  18. #include "clang/AST/StmtVisitor.h"
  19. #include "llvm/Constants.h"
  20. #include "llvm/Function.h"
  21. #include "llvm/GlobalVariable.h"
  22. #include "llvm/Intrinsics.h"
  23. using namespace clang;
  24. using namespace CodeGen;
  25. //===----------------------------------------------------------------------===//
  26. // Aggregate Expression Emitter
  27. //===----------------------------------------------------------------------===//
  28. namespace {
  29. class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
  30. CodeGenFunction &CGF;
  31. CGBuilderTy &Builder;
  32. AggValueSlot Dest;
  33. bool IgnoreResult;
  34. /// We want to use 'dest' as the return slot except under two
  35. /// conditions:
  36. /// - The destination slot requires garbage collection, so we
  37. /// need to use the GC API.
  38. /// - The destination slot is potentially aliased.
  39. bool shouldUseDestForReturnSlot() const {
  40. return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
  41. }
  42. ReturnValueSlot getReturnValueSlot() const {
  43. if (!shouldUseDestForReturnSlot())
  44. return ReturnValueSlot();
  45. return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
  46. }
  47. AggValueSlot EnsureSlot(QualType T) {
  48. if (!Dest.isIgnored()) return Dest;
  49. return CGF.CreateAggTemp(T, "agg.tmp.ensured");
  50. }
  51. public:
  52. AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
  53. bool ignore)
  54. : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
  55. IgnoreResult(ignore) {
  56. }
  57. //===--------------------------------------------------------------------===//
  58. // Utilities
  59. //===--------------------------------------------------------------------===//
  60. /// EmitAggLoadOfLValue - Given an expression with aggregate type that
  61. /// represents a value lvalue, this method emits the address of the lvalue,
  62. /// then loads the result into DestPtr.
  63. void EmitAggLoadOfLValue(const Expr *E);
  64. /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
  65. void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
  66. void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
  67. unsigned Alignment = 0);
  68. void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
  69. AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
  70. if (CGF.getLangOptions().getGC() && TypeRequiresGCollection(T))
  71. return AggValueSlot::NeedsGCBarriers;
  72. return AggValueSlot::DoesNotNeedGCBarriers;
  73. }
  74. bool TypeRequiresGCollection(QualType T);
  75. //===--------------------------------------------------------------------===//
  76. // Visitor Methods
  77. //===--------------------------------------------------------------------===//
  78. void VisitStmt(Stmt *S) {
  79. CGF.ErrorUnsupported(S, "aggregate expression");
  80. }
  81. void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
  82. void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
  83. Visit(GE->getResultExpr());
  84. }
  85. void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
  86. void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
  87. return Visit(E->getReplacement());
  88. }
  89. // l-values.
  90. void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
  91. void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
  92. void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
  93. void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
  94. void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
  95. void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
  96. EmitAggLoadOfLValue(E);
  97. }
  98. void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
  99. EmitAggLoadOfLValue(E);
  100. }
  101. void VisitPredefinedExpr(const PredefinedExpr *E) {
  102. EmitAggLoadOfLValue(E);
  103. }
  104. // Operators.
  105. void VisitCastExpr(CastExpr *E);
  106. void VisitCallExpr(const CallExpr *E);
  107. void VisitStmtExpr(const StmtExpr *E);
  108. void VisitBinaryOperator(const BinaryOperator *BO);
  109. void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
  110. void VisitBinAssign(const BinaryOperator *E);
  111. void VisitBinComma(const BinaryOperator *E);
  112. void VisitObjCMessageExpr(ObjCMessageExpr *E);
  113. void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
  114. EmitAggLoadOfLValue(E);
  115. }
  116. void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
  117. void VisitChooseExpr(const ChooseExpr *CE);
  118. void VisitInitListExpr(InitListExpr *E);
  119. void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
  120. void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
  121. Visit(DAE->getExpr());
  122. }
  123. void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
  124. void VisitCXXConstructExpr(const CXXConstructExpr *E);
  125. void VisitLambdaExpr(LambdaExpr *E);
  126. void VisitExprWithCleanups(ExprWithCleanups *E);
  127. void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
  128. void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
  129. void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
  130. void VisitOpaqueValueExpr(OpaqueValueExpr *E);
  131. void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
  132. if (E->isGLValue()) {
  133. LValue LV = CGF.EmitPseudoObjectLValue(E);
  134. return EmitFinalDestCopy(E, LV);
  135. }
  136. CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
  137. }
  138. void VisitVAArgExpr(VAArgExpr *E);
  139. void EmitInitializationToLValue(Expr *E, LValue Address);
  140. void EmitNullInitializationToLValue(LValue Address);
  141. // case Expr::ChooseExprClass:
  142. void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
  143. void VisitAtomicExpr(AtomicExpr *E) {
  144. CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
  145. }
  146. };
  147. } // end anonymous namespace.
  148. //===----------------------------------------------------------------------===//
  149. // Utilities
  150. //===----------------------------------------------------------------------===//
  151. /// EmitAggLoadOfLValue - Given an expression with aggregate type that
  152. /// represents a value lvalue, this method emits the address of the lvalue,
  153. /// then loads the result into DestPtr.
  154. void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
  155. LValue LV = CGF.EmitLValue(E);
  156. EmitFinalDestCopy(E, LV);
  157. }
  158. /// \brief True if the given aggregate type requires special GC API calls.
  159. bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
  160. // Only record types have members that might require garbage collection.
  161. const RecordType *RecordTy = T->getAs<RecordType>();
  162. if (!RecordTy) return false;
  163. // Don't mess with non-trivial C++ types.
  164. RecordDecl *Record = RecordTy->getDecl();
  165. if (isa<CXXRecordDecl>(Record) &&
  166. (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
  167. !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
  168. return false;
  169. // Check whether the type has an object member.
  170. return Record->hasObjectMember();
  171. }
  172. /// \brief Perform the final move to DestPtr if for some reason
  173. /// getReturnValueSlot() didn't use it directly.
  174. ///
  175. /// The idea is that you do something like this:
  176. /// RValue Result = EmitSomething(..., getReturnValueSlot());
  177. /// EmitMoveFromReturnSlot(E, Result);
  178. ///
  179. /// If nothing interferes, this will cause the result to be emitted
  180. /// directly into the return value slot. Otherwise, a final move
  181. /// will be performed.
  182. void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
  183. if (shouldUseDestForReturnSlot()) {
  184. // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
  185. // The possibility of undef rvalues complicates that a lot,
  186. // though, so we can't really assert.
  187. return;
  188. }
  189. // Otherwise, do a final copy,
  190. assert(Dest.getAddr() != Src.getAggregateAddr());
  191. EmitFinalDestCopy(E, Src, /*Ignore*/ true);
  192. }
  193. /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
  194. void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
  195. unsigned Alignment) {
  196. assert(Src.isAggregate() && "value must be aggregate value!");
  197. // If Dest is ignored, then we're evaluating an aggregate expression
  198. // in a context (like an expression statement) that doesn't care
  199. // about the result. C says that an lvalue-to-rvalue conversion is
  200. // performed in these cases; C++ says that it is not. In either
  201. // case, we don't actually need to do anything unless the value is
  202. // volatile.
  203. if (Dest.isIgnored()) {
  204. if (!Src.isVolatileQualified() ||
  205. CGF.CGM.getLangOptions().CPlusPlus ||
  206. (IgnoreResult && Ignore))
  207. return;
  208. // If the source is volatile, we must read from it; to do that, we need
  209. // some place to put it.
  210. Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
  211. }
  212. if (Dest.requiresGCollection()) {
  213. CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
  214. llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
  215. llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
  216. CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
  217. Dest.getAddr(),
  218. Src.getAggregateAddr(),
  219. SizeVal);
  220. return;
  221. }
  222. // If the result of the assignment is used, copy the LHS there also.
  223. // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
  224. // from the source as well, as we can't eliminate it if either operand
  225. // is volatile, unless copy has volatile for both source and destination..
  226. CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
  227. Dest.isVolatile()|Src.isVolatileQualified(),
  228. Alignment);
  229. }
  230. /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
  231. void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
  232. assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
  233. CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
  234. EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
  235. }
  236. //===----------------------------------------------------------------------===//
  237. // Visitor Methods
  238. //===----------------------------------------------------------------------===//
  239. void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
  240. Visit(E->GetTemporaryExpr());
  241. }
  242. void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
  243. EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
  244. }
  245. void
  246. AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
  247. if (E->getType().isPODType(CGF.getContext())) {
  248. // For a POD type, just emit a load of the lvalue + a copy, because our
  249. // compound literal might alias the destination.
  250. // FIXME: This is a band-aid; the real problem appears to be in our handling
  251. // of assignments, where we store directly into the LHS without checking
  252. // whether anything in the RHS aliases.
  253. EmitAggLoadOfLValue(E);
  254. return;
  255. }
  256. AggValueSlot Slot = EnsureSlot(E->getType());
  257. CGF.EmitAggExpr(E->getInitializer(), Slot);
  258. }
  259. void AggExprEmitter::VisitCastExpr(CastExpr *E) {
  260. switch (E->getCastKind()) {
  261. case CK_Dynamic: {
  262. assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
  263. LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
  264. // FIXME: Do we also need to handle property references here?
  265. if (LV.isSimple())
  266. CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
  267. else
  268. CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
  269. if (!Dest.isIgnored())
  270. CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
  271. break;
  272. }
  273. case CK_ToUnion: {
  274. if (Dest.isIgnored()) break;
  275. // GCC union extension
  276. QualType Ty = E->getSubExpr()->getType();
  277. QualType PtrTy = CGF.getContext().getPointerType(Ty);
  278. llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
  279. CGF.ConvertType(PtrTy));
  280. EmitInitializationToLValue(E->getSubExpr(),
  281. CGF.MakeAddrLValue(CastPtr, Ty));
  282. break;
  283. }
  284. case CK_DerivedToBase:
  285. case CK_BaseToDerived:
  286. case CK_UncheckedDerivedToBase: {
  287. llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
  288. "should have been unpacked before we got here");
  289. }
  290. case CK_LValueToRValue: // hope for downstream optimization
  291. case CK_NoOp:
  292. case CK_AtomicToNonAtomic:
  293. case CK_NonAtomicToAtomic:
  294. case CK_UserDefinedConversion:
  295. case CK_ConstructorConversion:
  296. assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
  297. E->getType()) &&
  298. "Implicit cast types must be compatible");
  299. Visit(E->getSubExpr());
  300. break;
  301. case CK_LValueBitCast:
  302. llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
  303. case CK_Dependent:
  304. case CK_BitCast:
  305. case CK_ArrayToPointerDecay:
  306. case CK_FunctionToPointerDecay:
  307. case CK_NullToPointer:
  308. case CK_NullToMemberPointer:
  309. case CK_BaseToDerivedMemberPointer:
  310. case CK_DerivedToBaseMemberPointer:
  311. case CK_MemberPointerToBoolean:
  312. case CK_ReinterpretMemberPointer:
  313. case CK_IntegralToPointer:
  314. case CK_PointerToIntegral:
  315. case CK_PointerToBoolean:
  316. case CK_ToVoid:
  317. case CK_VectorSplat:
  318. case CK_IntegralCast:
  319. case CK_IntegralToBoolean:
  320. case CK_IntegralToFloating:
  321. case CK_FloatingToIntegral:
  322. case CK_FloatingToBoolean:
  323. case CK_FloatingCast:
  324. case CK_CPointerToObjCPointerCast:
  325. case CK_BlockPointerToObjCPointerCast:
  326. case CK_AnyPointerToBlockPointerCast:
  327. case CK_ObjCObjectLValueCast:
  328. case CK_FloatingRealToComplex:
  329. case CK_FloatingComplexToReal:
  330. case CK_FloatingComplexToBoolean:
  331. case CK_FloatingComplexCast:
  332. case CK_FloatingComplexToIntegralComplex:
  333. case CK_IntegralRealToComplex:
  334. case CK_IntegralComplexToReal:
  335. case CK_IntegralComplexToBoolean:
  336. case CK_IntegralComplexCast:
  337. case CK_IntegralComplexToFloatingComplex:
  338. case CK_ARCProduceObject:
  339. case CK_ARCConsumeObject:
  340. case CK_ARCReclaimReturnedObject:
  341. case CK_ARCExtendBlockObject:
  342. llvm_unreachable("cast kind invalid for aggregate types");
  343. }
  344. }
  345. void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
  346. if (E->getCallReturnType()->isReferenceType()) {
  347. EmitAggLoadOfLValue(E);
  348. return;
  349. }
  350. RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
  351. EmitMoveFromReturnSlot(E, RV);
  352. }
  353. void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
  354. RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
  355. EmitMoveFromReturnSlot(E, RV);
  356. }
  357. void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
  358. CGF.EmitIgnoredExpr(E->getLHS());
  359. Visit(E->getRHS());
  360. }
  361. void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
  362. CodeGenFunction::StmtExprEvaluation eval(CGF);
  363. CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
  364. }
  365. void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
  366. if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
  367. VisitPointerToDataMemberBinaryOperator(E);
  368. else
  369. CGF.ErrorUnsupported(E, "aggregate binary expression");
  370. }
  371. void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
  372. const BinaryOperator *E) {
  373. LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
  374. EmitFinalDestCopy(E, LV);
  375. }
  376. void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
  377. // For an assignment to work, the value on the right has
  378. // to be compatible with the value on the left.
  379. assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
  380. E->getRHS()->getType())
  381. && "Invalid assignment");
  382. if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
  383. if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
  384. if (VD->hasAttr<BlocksAttr>() &&
  385. E->getRHS()->HasSideEffects(CGF.getContext())) {
  386. // When __block variable on LHS, the RHS must be evaluated first
  387. // as it may change the 'forwarding' field via call to Block_copy.
  388. LValue RHS = CGF.EmitLValue(E->getRHS());
  389. LValue LHS = CGF.EmitLValue(E->getLHS());
  390. Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
  391. needsGC(E->getLHS()->getType()),
  392. AggValueSlot::IsAliased);
  393. EmitFinalDestCopy(E, RHS, true);
  394. return;
  395. }
  396. LValue LHS = CGF.EmitLValue(E->getLHS());
  397. // Codegen the RHS so that it stores directly into the LHS.
  398. AggValueSlot LHSSlot =
  399. AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
  400. needsGC(E->getLHS()->getType()),
  401. AggValueSlot::IsAliased);
  402. CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
  403. EmitFinalDestCopy(E, LHS, true);
  404. }
  405. void AggExprEmitter::
  406. VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
  407. llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
  408. llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
  409. llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
  410. // Bind the common expression if necessary.
  411. CodeGenFunction::OpaqueValueMapping binding(CGF, E);
  412. CodeGenFunction::ConditionalEvaluation eval(CGF);
  413. CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
  414. // Save whether the destination's lifetime is externally managed.
  415. bool isExternallyDestructed = Dest.isExternallyDestructed();
  416. eval.begin(CGF);
  417. CGF.EmitBlock(LHSBlock);
  418. Visit(E->getTrueExpr());
  419. eval.end(CGF);
  420. assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
  421. CGF.Builder.CreateBr(ContBlock);
  422. // If the result of an agg expression is unused, then the emission
  423. // of the LHS might need to create a destination slot. That's fine
  424. // with us, and we can safely emit the RHS into the same slot, but
  425. // we shouldn't claim that it's already being destructed.
  426. Dest.setExternallyDestructed(isExternallyDestructed);
  427. eval.begin(CGF);
  428. CGF.EmitBlock(RHSBlock);
  429. Visit(E->getFalseExpr());
  430. eval.end(CGF);
  431. CGF.EmitBlock(ContBlock);
  432. }
  433. void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
  434. Visit(CE->getChosenSubExpr(CGF.getContext()));
  435. }
  436. void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
  437. llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
  438. llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
  439. if (!ArgPtr) {
  440. CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
  441. return;
  442. }
  443. EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
  444. }
  445. void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
  446. // Ensure that we have a slot, but if we already do, remember
  447. // whether it was externally destructed.
  448. bool wasExternallyDestructed = Dest.isExternallyDestructed();
  449. Dest = EnsureSlot(E->getType());
  450. // We're going to push a destructor if there isn't already one.
  451. Dest.setExternallyDestructed();
  452. Visit(E->getSubExpr());
  453. // Push that destructor we promised.
  454. if (!wasExternallyDestructed)
  455. CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
  456. }
  457. void
  458. AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
  459. AggValueSlot Slot = EnsureSlot(E->getType());
  460. CGF.EmitCXXConstructExpr(E, Slot);
  461. }
  462. void
  463. AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
  464. AggValueSlot Slot = EnsureSlot(E->getType());
  465. CGF.EmitLambdaExpr(E, Slot);
  466. }
  467. void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
  468. CGF.enterFullExpression(E);
  469. CodeGenFunction::RunCleanupsScope cleanups(CGF);
  470. Visit(E->getSubExpr());
  471. }
  472. void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
  473. QualType T = E->getType();
  474. AggValueSlot Slot = EnsureSlot(T);
  475. EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
  476. }
  477. void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
  478. QualType T = E->getType();
  479. AggValueSlot Slot = EnsureSlot(T);
  480. EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
  481. }
  482. /// isSimpleZero - If emitting this value will obviously just cause a store of
  483. /// zero to memory, return true. This can return false if uncertain, so it just
  484. /// handles simple cases.
  485. static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
  486. E = E->IgnoreParens();
  487. // 0
  488. if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
  489. return IL->getValue() == 0;
  490. // +0.0
  491. if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
  492. return FL->getValue().isPosZero();
  493. // int()
  494. if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
  495. CGF.getTypes().isZeroInitializable(E->getType()))
  496. return true;
  497. // (int*)0 - Null pointer expressions.
  498. if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
  499. return ICE->getCastKind() == CK_NullToPointer;
  500. // '\0'
  501. if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
  502. return CL->getValue() == 0;
  503. // Otherwise, hard case: conservatively return false.
  504. return false;
  505. }
  506. void
  507. AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
  508. QualType type = LV.getType();
  509. // FIXME: Ignore result?
  510. // FIXME: Are initializers affected by volatile?
  511. if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
  512. // Storing "i32 0" to a zero'd memory location is a noop.
  513. } else if (isa<ImplicitValueInitExpr>(E)) {
  514. EmitNullInitializationToLValue(LV);
  515. } else if (type->isReferenceType()) {
  516. RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
  517. CGF.EmitStoreThroughLValue(RV, LV);
  518. } else if (type->isAnyComplexType()) {
  519. CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
  520. } else if (CGF.hasAggregateLLVMType(type)) {
  521. CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
  522. AggValueSlot::IsDestructed,
  523. AggValueSlot::DoesNotNeedGCBarriers,
  524. AggValueSlot::IsNotAliased,
  525. Dest.isZeroed()));
  526. } else if (LV.isSimple()) {
  527. CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
  528. } else {
  529. CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
  530. }
  531. }
  532. void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
  533. QualType type = lv.getType();
  534. // If the destination slot is already zeroed out before the aggregate is
  535. // copied into it, we don't have to emit any zeros here.
  536. if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
  537. return;
  538. if (!CGF.hasAggregateLLVMType(type)) {
  539. // For non-aggregates, we can store zero
  540. llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
  541. CGF.EmitStoreThroughLValue(RValue::get(null), lv);
  542. } else {
  543. // There's a potential optimization opportunity in combining
  544. // memsets; that would be easy for arrays, but relatively
  545. // difficult for structures with the current code.
  546. CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
  547. }
  548. }
  549. void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
  550. #if 0
  551. // FIXME: Assess perf here? Figure out what cases are worth optimizing here
  552. // (Length of globals? Chunks of zeroed-out space?).
  553. //
  554. // If we can, prefer a copy from a global; this is a lot less code for long
  555. // globals, and it's easier for the current optimizers to analyze.
  556. if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
  557. llvm::GlobalVariable* GV =
  558. new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
  559. llvm::GlobalValue::InternalLinkage, C, "");
  560. EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
  561. return;
  562. }
  563. #endif
  564. if (E->hadArrayRangeDesignator())
  565. CGF.ErrorUnsupported(E, "GNU array range designator extension");
  566. llvm::Value *DestPtr = Dest.getAddr();
  567. // Handle initialization of an array.
  568. if (E->getType()->isArrayType()) {
  569. llvm::PointerType *APType =
  570. cast<llvm::PointerType>(DestPtr->getType());
  571. llvm::ArrayType *AType =
  572. cast<llvm::ArrayType>(APType->getElementType());
  573. uint64_t NumInitElements = E->getNumInits();
  574. if (E->getNumInits() > 0) {
  575. QualType T1 = E->getType();
  576. QualType T2 = E->getInit(0)->getType();
  577. if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
  578. EmitAggLoadOfLValue(E->getInit(0));
  579. return;
  580. }
  581. }
  582. uint64_t NumArrayElements = AType->getNumElements();
  583. assert(NumInitElements <= NumArrayElements);
  584. QualType elementType = E->getType().getCanonicalType();
  585. elementType = CGF.getContext().getQualifiedType(
  586. cast<ArrayType>(elementType)->getElementType(),
  587. elementType.getQualifiers() + Dest.getQualifiers());
  588. // DestPtr is an array*. Construct an elementType* by drilling
  589. // down a level.
  590. llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
  591. llvm::Value *indices[] = { zero, zero };
  592. llvm::Value *begin =
  593. Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
  594. // Exception safety requires us to destroy all the
  595. // already-constructed members if an initializer throws.
  596. // For that, we'll need an EH cleanup.
  597. QualType::DestructionKind dtorKind = elementType.isDestructedType();
  598. llvm::AllocaInst *endOfInit = 0;
  599. EHScopeStack::stable_iterator cleanup;
  600. llvm::Instruction *cleanupDominator = 0;
  601. if (CGF.needsEHCleanup(dtorKind)) {
  602. // In principle we could tell the cleanup where we are more
  603. // directly, but the control flow can get so varied here that it
  604. // would actually be quite complex. Therefore we go through an
  605. // alloca.
  606. endOfInit = CGF.CreateTempAlloca(begin->getType(),
  607. "arrayinit.endOfInit");
  608. cleanupDominator = Builder.CreateStore(begin, endOfInit);
  609. CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
  610. CGF.getDestroyer(dtorKind));
  611. cleanup = CGF.EHStack.stable_begin();
  612. // Otherwise, remember that we didn't need a cleanup.
  613. } else {
  614. dtorKind = QualType::DK_none;
  615. }
  616. llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
  617. // The 'current element to initialize'. The invariants on this
  618. // variable are complicated. Essentially, after each iteration of
  619. // the loop, it points to the last initialized element, except
  620. // that it points to the beginning of the array before any
  621. // elements have been initialized.
  622. llvm::Value *element = begin;
  623. // Emit the explicit initializers.
  624. for (uint64_t i = 0; i != NumInitElements; ++i) {
  625. // Advance to the next element.
  626. if (i > 0) {
  627. element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
  628. // Tell the cleanup that it needs to destroy up to this
  629. // element. TODO: some of these stores can be trivially
  630. // observed to be unnecessary.
  631. if (endOfInit) Builder.CreateStore(element, endOfInit);
  632. }
  633. LValue elementLV = CGF.MakeAddrLValue(element, elementType);
  634. EmitInitializationToLValue(E->getInit(i), elementLV);
  635. }
  636. // Check whether there's a non-trivial array-fill expression.
  637. // Note that this will be a CXXConstructExpr even if the element
  638. // type is an array (or array of array, etc.) of class type.
  639. Expr *filler = E->getArrayFiller();
  640. bool hasTrivialFiller = true;
  641. if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
  642. assert(cons->getConstructor()->isDefaultConstructor());
  643. hasTrivialFiller = cons->getConstructor()->isTrivial();
  644. }
  645. // Any remaining elements need to be zero-initialized, possibly
  646. // using the filler expression. We can skip this if the we're
  647. // emitting to zeroed memory.
  648. if (NumInitElements != NumArrayElements &&
  649. !(Dest.isZeroed() && hasTrivialFiller &&
  650. CGF.getTypes().isZeroInitializable(elementType))) {
  651. // Use an actual loop. This is basically
  652. // do { *array++ = filler; } while (array != end);
  653. // Advance to the start of the rest of the array.
  654. if (NumInitElements) {
  655. element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
  656. if (endOfInit) Builder.CreateStore(element, endOfInit);
  657. }
  658. // Compute the end of the array.
  659. llvm::Value *end = Builder.CreateInBoundsGEP(begin,
  660. llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
  661. "arrayinit.end");
  662. llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
  663. llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
  664. // Jump into the body.
  665. CGF.EmitBlock(bodyBB);
  666. llvm::PHINode *currentElement =
  667. Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
  668. currentElement->addIncoming(element, entryBB);
  669. // Emit the actual filler expression.
  670. LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
  671. if (filler)
  672. EmitInitializationToLValue(filler, elementLV);
  673. else
  674. EmitNullInitializationToLValue(elementLV);
  675. // Move on to the next element.
  676. llvm::Value *nextElement =
  677. Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
  678. // Tell the EH cleanup that we finished with the last element.
  679. if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
  680. // Leave the loop if we're done.
  681. llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
  682. "arrayinit.done");
  683. llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
  684. Builder.CreateCondBr(done, endBB, bodyBB);
  685. currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
  686. CGF.EmitBlock(endBB);
  687. }
  688. // Leave the partial-array cleanup if we entered one.
  689. if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
  690. return;
  691. }
  692. assert(E->getType()->isRecordType() && "Only support structs/unions here!");
  693. // Do struct initialization; this code just sets each individual member
  694. // to the approprate value. This makes bitfield support automatic;
  695. // the disadvantage is that the generated code is more difficult for
  696. // the optimizer, especially with bitfields.
  697. unsigned NumInitElements = E->getNumInits();
  698. RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
  699. if (record->isUnion()) {
  700. // Only initialize one field of a union. The field itself is
  701. // specified by the initializer list.
  702. if (!E->getInitializedFieldInUnion()) {
  703. // Empty union; we have nothing to do.
  704. #ifndef NDEBUG
  705. // Make sure that it's really an empty and not a failure of
  706. // semantic analysis.
  707. for (RecordDecl::field_iterator Field = record->field_begin(),
  708. FieldEnd = record->field_end();
  709. Field != FieldEnd; ++Field)
  710. assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
  711. #endif
  712. return;
  713. }
  714. // FIXME: volatility
  715. FieldDecl *Field = E->getInitializedFieldInUnion();
  716. LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
  717. if (NumInitElements) {
  718. // Store the initializer into the field
  719. EmitInitializationToLValue(E->getInit(0), FieldLoc);
  720. } else {
  721. // Default-initialize to null.
  722. EmitNullInitializationToLValue(FieldLoc);
  723. }
  724. return;
  725. }
  726. // We'll need to enter cleanup scopes in case any of the member
  727. // initializers throw an exception.
  728. SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
  729. llvm::Instruction *cleanupDominator = 0;
  730. // Here we iterate over the fields; this makes it simpler to both
  731. // default-initialize fields and skip over unnamed fields.
  732. unsigned curInitIndex = 0;
  733. for (RecordDecl::field_iterator field = record->field_begin(),
  734. fieldEnd = record->field_end();
  735. field != fieldEnd; ++field) {
  736. // We're done once we hit the flexible array member.
  737. if (field->getType()->isIncompleteArrayType())
  738. break;
  739. // Always skip anonymous bitfields.
  740. if (field->isUnnamedBitfield())
  741. continue;
  742. // We're done if we reach the end of the explicit initializers, we
  743. // have a zeroed object, and the rest of the fields are
  744. // zero-initializable.
  745. if (curInitIndex == NumInitElements && Dest.isZeroed() &&
  746. CGF.getTypes().isZeroInitializable(E->getType()))
  747. break;
  748. // FIXME: volatility
  749. LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
  750. // We never generate write-barries for initialized fields.
  751. LV.setNonGC(true);
  752. if (curInitIndex < NumInitElements) {
  753. // Store the initializer into the field.
  754. EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
  755. } else {
  756. // We're out of initalizers; default-initialize to null
  757. EmitNullInitializationToLValue(LV);
  758. }
  759. // Push a destructor if necessary.
  760. // FIXME: if we have an array of structures, all explicitly
  761. // initialized, we can end up pushing a linear number of cleanups.
  762. bool pushedCleanup = false;
  763. if (QualType::DestructionKind dtorKind
  764. = field->getType().isDestructedType()) {
  765. assert(LV.isSimple());
  766. if (CGF.needsEHCleanup(dtorKind)) {
  767. if (!cleanupDominator)
  768. cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
  769. CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
  770. CGF.getDestroyer(dtorKind), false);
  771. cleanups.push_back(CGF.EHStack.stable_begin());
  772. pushedCleanup = true;
  773. }
  774. }
  775. // If the GEP didn't get used because of a dead zero init or something
  776. // else, clean it up for -O0 builds and general tidiness.
  777. if (!pushedCleanup && LV.isSimple())
  778. if (llvm::GetElementPtrInst *GEP =
  779. dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
  780. if (GEP->use_empty())
  781. GEP->eraseFromParent();
  782. }
  783. // Deactivate all the partial cleanups in reverse order, which
  784. // generally means popping them.
  785. for (unsigned i = cleanups.size(); i != 0; --i)
  786. CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
  787. // Destroy the placeholder if we made one.
  788. if (cleanupDominator)
  789. cleanupDominator->eraseFromParent();
  790. }
  791. //===----------------------------------------------------------------------===//
  792. // Entry Points into this File
  793. //===----------------------------------------------------------------------===//
  794. /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
  795. /// non-zero bytes that will be stored when outputting the initializer for the
  796. /// specified initializer expression.
  797. static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
  798. E = E->IgnoreParens();
  799. // 0 and 0.0 won't require any non-zero stores!
  800. if (isSimpleZero(E, CGF)) return CharUnits::Zero();
  801. // If this is an initlist expr, sum up the size of sizes of the (present)
  802. // elements. If this is something weird, assume the whole thing is non-zero.
  803. const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
  804. if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
  805. return CGF.getContext().getTypeSizeInChars(E->getType());
  806. // InitListExprs for structs have to be handled carefully. If there are
  807. // reference members, we need to consider the size of the reference, not the
  808. // referencee. InitListExprs for unions and arrays can't have references.
  809. if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
  810. if (!RT->isUnionType()) {
  811. RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
  812. CharUnits NumNonZeroBytes = CharUnits::Zero();
  813. unsigned ILEElement = 0;
  814. for (RecordDecl::field_iterator Field = SD->field_begin(),
  815. FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
  816. // We're done once we hit the flexible array member or run out of
  817. // InitListExpr elements.
  818. if (Field->getType()->isIncompleteArrayType() ||
  819. ILEElement == ILE->getNumInits())
  820. break;
  821. if (Field->isUnnamedBitfield())
  822. continue;
  823. const Expr *E = ILE->getInit(ILEElement++);
  824. // Reference values are always non-null and have the width of a pointer.
  825. if (Field->getType()->isReferenceType())
  826. NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
  827. CGF.getContext().getTargetInfo().getPointerWidth(0));
  828. else
  829. NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
  830. }
  831. return NumNonZeroBytes;
  832. }
  833. }
  834. CharUnits NumNonZeroBytes = CharUnits::Zero();
  835. for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
  836. NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
  837. return NumNonZeroBytes;
  838. }
  839. /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
  840. /// zeros in it, emit a memset and avoid storing the individual zeros.
  841. ///
  842. static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
  843. CodeGenFunction &CGF) {
  844. // If the slot is already known to be zeroed, nothing to do. Don't mess with
  845. // volatile stores.
  846. if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
  847. // C++ objects with a user-declared constructor don't need zero'ing.
  848. if (CGF.getContext().getLangOptions().CPlusPlus)
  849. if (const RecordType *RT = CGF.getContext()
  850. .getBaseElementType(E->getType())->getAs<RecordType>()) {
  851. const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
  852. if (RD->hasUserDeclaredConstructor())
  853. return;
  854. }
  855. // If the type is 16-bytes or smaller, prefer individual stores over memset.
  856. std::pair<CharUnits, CharUnits> TypeInfo =
  857. CGF.getContext().getTypeInfoInChars(E->getType());
  858. if (TypeInfo.first <= CharUnits::fromQuantity(16))
  859. return;
  860. // Check to see if over 3/4 of the initializer are known to be zero. If so,
  861. // we prefer to emit memset + individual stores for the rest.
  862. CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
  863. if (NumNonZeroBytes*4 > TypeInfo.first)
  864. return;
  865. // Okay, it seems like a good idea to use an initial memset, emit the call.
  866. llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
  867. CharUnits Align = TypeInfo.second;
  868. llvm::Value *Loc = Slot.getAddr();
  869. Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
  870. CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
  871. Align.getQuantity(), false);
  872. // Tell the AggExprEmitter that the slot is known zero.
  873. Slot.setZeroed();
  874. }
  875. /// EmitAggExpr - Emit the computation of the specified expression of aggregate
  876. /// type. The result is computed into DestPtr. Note that if DestPtr is null,
  877. /// the value of the aggregate expression is not needed. If VolatileDest is
  878. /// true, DestPtr cannot be 0.
  879. ///
  880. /// \param IsInitializer - true if this evaluation is initializing an
  881. /// object whose lifetime is already being managed.
  882. void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
  883. bool IgnoreResult) {
  884. assert(E && hasAggregateLLVMType(E->getType()) &&
  885. "Invalid aggregate expression to emit");
  886. assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
  887. "slot has bits but no address");
  888. // Optimize the slot if possible.
  889. CheckAggExprForMemSetUse(Slot, E, *this);
  890. AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
  891. }
  892. LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
  893. assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
  894. llvm::Value *Temp = CreateMemTemp(E->getType());
  895. LValue LV = MakeAddrLValue(Temp, E->getType());
  896. EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
  897. AggValueSlot::DoesNotNeedGCBarriers,
  898. AggValueSlot::IsNotAliased));
  899. return LV;
  900. }
  901. void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
  902. llvm::Value *SrcPtr, QualType Ty,
  903. bool isVolatile, unsigned Alignment) {
  904. assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
  905. if (getContext().getLangOptions().CPlusPlus) {
  906. if (const RecordType *RT = Ty->getAs<RecordType>()) {
  907. CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
  908. assert((Record->hasTrivialCopyConstructor() ||
  909. Record->hasTrivialCopyAssignment() ||
  910. Record->hasTrivialMoveConstructor() ||
  911. Record->hasTrivialMoveAssignment()) &&
  912. "Trying to aggregate-copy a type without a trivial copy "
  913. "constructor or assignment operator");
  914. // Ignore empty classes in C++.
  915. if (Record->isEmpty())
  916. return;
  917. }
  918. }
  919. // Aggregate assignment turns into llvm.memcpy. This is almost valid per
  920. // C99 6.5.16.1p3, which states "If the value being stored in an object is
  921. // read from another object that overlaps in anyway the storage of the first
  922. // object, then the overlap shall be exact and the two objects shall have
  923. // qualified or unqualified versions of a compatible type."
  924. //
  925. // memcpy is not defined if the source and destination pointers are exactly
  926. // equal, but other compilers do this optimization, and almost every memcpy
  927. // implementation handles this case safely. If there is a libc that does not
  928. // safely handle this, we can add a target hook.
  929. // Get size and alignment info for this aggregate.
  930. std::pair<CharUnits, CharUnits> TypeInfo =
  931. getContext().getTypeInfoInChars(Ty);
  932. if (!Alignment)
  933. Alignment = TypeInfo.second.getQuantity();
  934. // FIXME: Handle variable sized types.
  935. // FIXME: If we have a volatile struct, the optimizer can remove what might
  936. // appear to be `extra' memory ops:
  937. //
  938. // volatile struct { int i; } a, b;
  939. //
  940. // int main() {
  941. // a = b;
  942. // a = b;
  943. // }
  944. //
  945. // we need to use a different call here. We use isVolatile to indicate when
  946. // either the source or the destination is volatile.
  947. llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
  948. llvm::Type *DBP =
  949. llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
  950. DestPtr = Builder.CreateBitCast(DestPtr, DBP);
  951. llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
  952. llvm::Type *SBP =
  953. llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
  954. SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
  955. // Don't do any of the memmove_collectable tests if GC isn't set.
  956. if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
  957. // fall through
  958. } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
  959. RecordDecl *Record = RecordTy->getDecl();
  960. if (Record->hasObjectMember()) {
  961. CharUnits size = TypeInfo.first;
  962. llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
  963. llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
  964. CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
  965. SizeVal);
  966. return;
  967. }
  968. } else if (Ty->isArrayType()) {
  969. QualType BaseType = getContext().getBaseElementType(Ty);
  970. if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
  971. if (RecordTy->getDecl()->hasObjectMember()) {
  972. CharUnits size = TypeInfo.first;
  973. llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
  974. llvm::Value *SizeVal =
  975. llvm::ConstantInt::get(SizeTy, size.getQuantity());
  976. CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
  977. SizeVal);
  978. return;
  979. }
  980. }
  981. }
  982. Builder.CreateMemCpy(DestPtr, SrcPtr,
  983. llvm::ConstantInt::get(IntPtrTy,
  984. TypeInfo.first.getQuantity()),
  985. Alignment, isVolatile);
  986. }