CGRecordLayoutBuilder.cpp 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105
  1. //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // Builder implementation for CGRecordLayout objects.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CGRecordLayout.h"
  14. #include "clang/AST/ASTContext.h"
  15. #include "clang/AST/Attr.h"
  16. #include "clang/AST/CXXInheritance.h"
  17. #include "clang/AST/DeclCXX.h"
  18. #include "clang/AST/Expr.h"
  19. #include "clang/AST/RecordLayout.h"
  20. #include "clang/Frontend/CodeGenOptions.h"
  21. #include "CodeGenTypes.h"
  22. #include "CGCXXABI.h"
  23. #include "llvm/DerivedTypes.h"
  24. #include "llvm/Type.h"
  25. #include "llvm/Support/Debug.h"
  26. #include "llvm/Support/raw_ostream.h"
  27. #include "llvm/Target/TargetData.h"
  28. using namespace clang;
  29. using namespace CodeGen;
  30. namespace {
  31. class CGRecordLayoutBuilder {
  32. public:
  33. /// FieldTypes - Holds the LLVM types that the struct is created from.
  34. ///
  35. SmallVector<llvm::Type *, 16> FieldTypes;
  36. /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
  37. /// of the struct. For example, consider:
  38. ///
  39. /// struct A { int i; };
  40. /// struct B { void *v; };
  41. /// struct C : virtual A, B { };
  42. ///
  43. /// The LLVM type of C will be
  44. /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
  45. ///
  46. /// And the LLVM type of the non-virtual base struct will be
  47. /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
  48. ///
  49. /// This only gets initialized if the base subobject type is
  50. /// different from the complete-object type.
  51. llvm::StructType *BaseSubobjectType;
  52. /// FieldInfo - Holds a field and its corresponding LLVM field number.
  53. llvm::DenseMap<const FieldDecl *, unsigned> Fields;
  54. /// BitFieldInfo - Holds location and size information about a bit field.
  55. llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
  56. llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
  57. llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
  58. /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
  59. /// primary base classes for some other direct or indirect base class.
  60. CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
  61. /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
  62. /// avoid laying out virtual bases more than once.
  63. llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
  64. /// IsZeroInitializable - Whether this struct can be C++
  65. /// zero-initialized with an LLVM zeroinitializer.
  66. bool IsZeroInitializable;
  67. bool IsZeroInitializableAsBase;
  68. /// Packed - Whether the resulting LLVM struct will be packed or not.
  69. bool Packed;
  70. /// IsMsStruct - Whether ms_struct is in effect or not
  71. bool IsMsStruct;
  72. private:
  73. CodeGenTypes &Types;
  74. /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
  75. /// last base laid out. Used so that we can replace the last laid out base
  76. /// type with an i8 array if needed.
  77. struct LastLaidOutBaseInfo {
  78. CharUnits Offset;
  79. CharUnits NonVirtualSize;
  80. bool isValid() const { return !NonVirtualSize.isZero(); }
  81. void invalidate() { NonVirtualSize = CharUnits::Zero(); }
  82. } LastLaidOutBase;
  83. /// Alignment - Contains the alignment of the RecordDecl.
  84. CharUnits Alignment;
  85. /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
  86. /// this will have the number of bits still available in the field.
  87. char BitsAvailableInLastField;
  88. /// NextFieldOffset - Holds the next field offset.
  89. CharUnits NextFieldOffset;
  90. /// LayoutUnionField - Will layout a field in an union and return the type
  91. /// that the field will have.
  92. llvm::Type *LayoutUnionField(const FieldDecl *Field,
  93. const ASTRecordLayout &Layout);
  94. /// LayoutUnion - Will layout a union RecordDecl.
  95. void LayoutUnion(const RecordDecl *D);
  96. /// LayoutField - try to layout all fields in the record decl.
  97. /// Returns false if the operation failed because the struct is not packed.
  98. bool LayoutFields(const RecordDecl *D);
  99. /// Layout a single base, virtual or non-virtual
  100. void LayoutBase(const CXXRecordDecl *base,
  101. const CGRecordLayout &baseLayout,
  102. CharUnits baseOffset);
  103. /// LayoutVirtualBase - layout a single virtual base.
  104. void LayoutVirtualBase(const CXXRecordDecl *base,
  105. CharUnits baseOffset);
  106. /// LayoutVirtualBases - layout the virtual bases of a record decl.
  107. void LayoutVirtualBases(const CXXRecordDecl *RD,
  108. const ASTRecordLayout &Layout);
  109. /// LayoutNonVirtualBase - layout a single non-virtual base.
  110. void LayoutNonVirtualBase(const CXXRecordDecl *base,
  111. CharUnits baseOffset);
  112. /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
  113. void LayoutNonVirtualBases(const CXXRecordDecl *RD,
  114. const ASTRecordLayout &Layout);
  115. /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
  116. bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
  117. /// LayoutField - layout a single field. Returns false if the operation failed
  118. /// because the current struct is not packed.
  119. bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
  120. /// LayoutBitField - layout a single bit field.
  121. void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
  122. /// AppendField - Appends a field with the given offset and type.
  123. void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
  124. /// AppendPadding - Appends enough padding bytes so that the total
  125. /// struct size is a multiple of the field alignment.
  126. void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
  127. /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
  128. /// tail padding of a previous base. If this happens, the type of the previous
  129. /// base needs to be changed to an array of i8. Returns true if the last
  130. /// laid out base was resized.
  131. bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
  132. /// getByteArrayType - Returns a byte array type with the given number of
  133. /// elements.
  134. llvm::Type *getByteArrayType(CharUnits NumBytes);
  135. /// AppendBytes - Append a given number of bytes to the record.
  136. void AppendBytes(CharUnits numBytes);
  137. /// AppendTailPadding - Append enough tail padding so that the type will have
  138. /// the passed size.
  139. void AppendTailPadding(CharUnits RecordSize);
  140. CharUnits getTypeAlignment(llvm::Type *Ty) const;
  141. /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
  142. /// LLVM element types.
  143. CharUnits getAlignmentAsLLVMStruct() const;
  144. /// CheckZeroInitializable - Check if the given type contains a pointer
  145. /// to data member.
  146. void CheckZeroInitializable(QualType T);
  147. public:
  148. CGRecordLayoutBuilder(CodeGenTypes &Types)
  149. : BaseSubobjectType(0),
  150. IsZeroInitializable(true), IsZeroInitializableAsBase(true),
  151. Packed(false), IsMsStruct(false),
  152. Types(Types), BitsAvailableInLastField(0) { }
  153. /// Layout - Will layout a RecordDecl.
  154. void Layout(const RecordDecl *D);
  155. };
  156. }
  157. void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
  158. Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
  159. Packed = D->hasAttr<PackedAttr>();
  160. IsMsStruct = D->hasAttr<MsStructAttr>();
  161. if (D->isUnion()) {
  162. LayoutUnion(D);
  163. return;
  164. }
  165. if (LayoutFields(D))
  166. return;
  167. // We weren't able to layout the struct. Try again with a packed struct
  168. Packed = true;
  169. LastLaidOutBase.invalidate();
  170. NextFieldOffset = CharUnits::Zero();
  171. FieldTypes.clear();
  172. Fields.clear();
  173. BitFields.clear();
  174. NonVirtualBases.clear();
  175. VirtualBases.clear();
  176. LayoutFields(D);
  177. }
  178. CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
  179. const FieldDecl *FD,
  180. uint64_t FieldOffset,
  181. uint64_t FieldSize,
  182. uint64_t ContainingTypeSizeInBits,
  183. unsigned ContainingTypeAlign) {
  184. llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
  185. CharUnits TypeSizeInBytes =
  186. CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
  187. uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
  188. bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
  189. if (FieldSize > TypeSizeInBits) {
  190. // We have a wide bit-field. The extra bits are only used for padding, so
  191. // if we have a bitfield of type T, with size N:
  192. //
  193. // T t : N;
  194. //
  195. // We can just assume that it's:
  196. //
  197. // T t : sizeof(T);
  198. //
  199. FieldSize = TypeSizeInBits;
  200. }
  201. // in big-endian machines the first fields are in higher bit positions,
  202. // so revert the offset. The byte offsets are reversed(back) later.
  203. if (Types.getTargetData().isBigEndian()) {
  204. FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
  205. }
  206. // Compute the access components. The policy we use is to start by attempting
  207. // to access using the width of the bit-field type itself and to always access
  208. // at aligned indices of that type. If such an access would fail because it
  209. // extends past the bound of the type, then we reduce size to the next smaller
  210. // power of two and retry. The current algorithm assumes pow2 sized types,
  211. // although this is easy to fix.
  212. //
  213. assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
  214. CGBitFieldInfo::AccessInfo Components[3];
  215. unsigned NumComponents = 0;
  216. unsigned AccessedTargetBits = 0; // The number of target bits accessed.
  217. unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
  218. // If requested, widen the initial bit-field access to be register sized. The
  219. // theory is that this is most likely to allow multiple accesses into the same
  220. // structure to be coalesced, and that the backend should be smart enough to
  221. // narrow the store if no coalescing is ever done.
  222. //
  223. // The subsequent code will handle align these access to common boundaries and
  224. // guaranteeing that we do not access past the end of the structure.
  225. if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
  226. if (AccessWidth < Types.getTarget().getRegisterWidth())
  227. AccessWidth = Types.getTarget().getRegisterWidth();
  228. }
  229. // Round down from the field offset to find the first access position that is
  230. // at an aligned offset of the initial access type.
  231. uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  232. // Adjust initial access size to fit within record.
  233. while (AccessWidth > Types.getTarget().getCharWidth() &&
  234. AccessStart + AccessWidth > ContainingTypeSizeInBits) {
  235. AccessWidth >>= 1;
  236. AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  237. }
  238. while (AccessedTargetBits < FieldSize) {
  239. // Check that we can access using a type of this size, without reading off
  240. // the end of the structure. This can occur with packed structures and
  241. // -fno-bitfield-type-align, for example.
  242. if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
  243. // If so, reduce access size to the next smaller power-of-two and retry.
  244. AccessWidth >>= 1;
  245. assert(AccessWidth >= Types.getTarget().getCharWidth()
  246. && "Cannot access under byte size!");
  247. continue;
  248. }
  249. // Otherwise, add an access component.
  250. // First, compute the bits inside this access which are part of the
  251. // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
  252. // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
  253. // in the target that we are reading.
  254. assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
  255. assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
  256. uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
  257. uint64_t AccessBitsInFieldSize =
  258. std::min(AccessWidth + AccessStart,
  259. FieldOffset + FieldSize) - AccessBitsInFieldStart;
  260. assert(NumComponents < 3 && "Unexpected number of components!");
  261. CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
  262. AI.FieldIndex = 0;
  263. // FIXME: We still follow the old access pattern of only using the field
  264. // byte offset. We should switch this once we fix the struct layout to be
  265. // pretty.
  266. // on big-endian machines we reverted the bit offset because first fields are
  267. // in higher bits. But this also reverts the bytes, so fix this here by reverting
  268. // the byte offset on big-endian machines.
  269. if (Types.getTargetData().isBigEndian()) {
  270. AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
  271. ContainingTypeSizeInBits - AccessStart - AccessWidth);
  272. } else {
  273. AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
  274. }
  275. AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
  276. AI.AccessWidth = AccessWidth;
  277. AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
  278. llvm::MinAlign(ContainingTypeAlign, AccessStart));
  279. AI.TargetBitOffset = AccessedTargetBits;
  280. AI.TargetBitWidth = AccessBitsInFieldSize;
  281. AccessStart += AccessWidth;
  282. AccessedTargetBits += AI.TargetBitWidth;
  283. }
  284. assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
  285. return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
  286. }
  287. CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
  288. const FieldDecl *FD,
  289. uint64_t FieldOffset,
  290. uint64_t FieldSize) {
  291. const RecordDecl *RD = FD->getParent();
  292. const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
  293. uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
  294. unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
  295. return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
  296. ContainingTypeAlign);
  297. }
  298. void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
  299. uint64_t fieldOffset) {
  300. uint64_t fieldSize =
  301. D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
  302. if (fieldSize == 0)
  303. return;
  304. uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
  305. CharUnits numBytesToAppend;
  306. unsigned charAlign = Types.getContext().Target.getCharAlign();
  307. if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
  308. assert(fieldOffset % charAlign == 0 &&
  309. "Field offset not aligned correctly");
  310. CharUnits fieldOffsetInCharUnits =
  311. Types.getContext().toCharUnitsFromBits(fieldOffset);
  312. // Try to resize the last base field.
  313. if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
  314. nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
  315. }
  316. if (fieldOffset < nextFieldOffsetInBits) {
  317. assert(BitsAvailableInLastField && "Bitfield size mismatch!");
  318. assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
  319. // The bitfield begins in the previous bit-field.
  320. numBytesToAppend = Types.getContext().toCharUnitsFromBits(
  321. llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
  322. charAlign));
  323. } else {
  324. assert(fieldOffset % charAlign == 0 &&
  325. "Field offset not aligned correctly");
  326. // Append padding if necessary.
  327. AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
  328. CharUnits::One());
  329. numBytesToAppend = Types.getContext().toCharUnitsFromBits(
  330. llvm::RoundUpToAlignment(fieldSize, charAlign));
  331. assert(!numBytesToAppend.isZero() && "No bytes to append!");
  332. }
  333. // Add the bit field info.
  334. BitFields.insert(std::make_pair(D,
  335. CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
  336. AppendBytes(numBytesToAppend);
  337. BitsAvailableInLastField =
  338. Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
  339. }
  340. bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
  341. uint64_t fieldOffset) {
  342. // If the field is packed, then we need a packed struct.
  343. if (!Packed && D->hasAttr<PackedAttr>())
  344. return false;
  345. if (D->isBitField()) {
  346. // We must use packed structs for unnamed bit fields since they
  347. // don't affect the struct alignment.
  348. if (!Packed && !D->getDeclName())
  349. return false;
  350. LayoutBitField(D, fieldOffset);
  351. return true;
  352. }
  353. CheckZeroInitializable(D->getType());
  354. assert(fieldOffset % Types.getTarget().getCharWidth() == 0
  355. && "field offset is not on a byte boundary!");
  356. CharUnits fieldOffsetInBytes
  357. = Types.getContext().toCharUnitsFromBits(fieldOffset);
  358. llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
  359. CharUnits typeAlignment = getTypeAlignment(Ty);
  360. // If the type alignment is larger then the struct alignment, we must use
  361. // a packed struct.
  362. if (typeAlignment > Alignment) {
  363. assert(!Packed && "Alignment is wrong even with packed struct!");
  364. return false;
  365. }
  366. if (!Packed) {
  367. if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
  368. const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
  369. if (const MaxFieldAlignmentAttr *MFAA =
  370. RD->getAttr<MaxFieldAlignmentAttr>()) {
  371. if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
  372. return false;
  373. }
  374. }
  375. }
  376. // Round up the field offset to the alignment of the field type.
  377. CharUnits alignedNextFieldOffsetInBytes =
  378. NextFieldOffset.RoundUpToAlignment(typeAlignment);
  379. if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
  380. // Try to resize the last base field.
  381. if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
  382. alignedNextFieldOffsetInBytes =
  383. NextFieldOffset.RoundUpToAlignment(typeAlignment);
  384. }
  385. }
  386. if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
  387. assert(!Packed && "Could not place field even with packed struct!");
  388. return false;
  389. }
  390. AppendPadding(fieldOffsetInBytes, typeAlignment);
  391. // Now append the field.
  392. Fields[D] = FieldTypes.size();
  393. AppendField(fieldOffsetInBytes, Ty);
  394. LastLaidOutBase.invalidate();
  395. return true;
  396. }
  397. llvm::Type *
  398. CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
  399. const ASTRecordLayout &Layout) {
  400. if (Field->isBitField()) {
  401. uint64_t FieldSize =
  402. Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
  403. // Ignore zero sized bit fields.
  404. if (FieldSize == 0)
  405. return 0;
  406. llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
  407. CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
  408. llvm::RoundUpToAlignment(FieldSize,
  409. Types.getContext().Target.getCharAlign()));
  410. if (NumBytesToAppend > CharUnits::One())
  411. FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
  412. // Add the bit field info.
  413. BitFields.insert(std::make_pair(Field,
  414. CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
  415. return FieldTy;
  416. }
  417. // This is a regular union field.
  418. Fields[Field] = 0;
  419. return Types.ConvertTypeForMem(Field->getType());
  420. }
  421. void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
  422. assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
  423. const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
  424. llvm::Type *unionType = 0;
  425. CharUnits unionSize = CharUnits::Zero();
  426. CharUnits unionAlign = CharUnits::Zero();
  427. bool hasOnlyZeroSizedBitFields = true;
  428. unsigned fieldNo = 0;
  429. for (RecordDecl::field_iterator field = D->field_begin(),
  430. fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
  431. assert(layout.getFieldOffset(fieldNo) == 0 &&
  432. "Union field offset did not start at the beginning of record!");
  433. llvm::Type *fieldType = LayoutUnionField(*field, layout);
  434. if (!fieldType)
  435. continue;
  436. hasOnlyZeroSizedBitFields = false;
  437. CharUnits fieldAlign = CharUnits::fromQuantity(
  438. Types.getTargetData().getABITypeAlignment(fieldType));
  439. CharUnits fieldSize = CharUnits::fromQuantity(
  440. Types.getTargetData().getTypeAllocSize(fieldType));
  441. if (fieldAlign < unionAlign)
  442. continue;
  443. if (fieldAlign > unionAlign || fieldSize > unionSize) {
  444. unionType = fieldType;
  445. unionAlign = fieldAlign;
  446. unionSize = fieldSize;
  447. }
  448. }
  449. // Now add our field.
  450. if (unionType) {
  451. AppendField(CharUnits::Zero(), unionType);
  452. if (getTypeAlignment(unionType) > layout.getAlignment()) {
  453. // We need a packed struct.
  454. Packed = true;
  455. unionAlign = CharUnits::One();
  456. }
  457. }
  458. if (unionAlign.isZero()) {
  459. assert(hasOnlyZeroSizedBitFields &&
  460. "0-align record did not have all zero-sized bit-fields!");
  461. unionAlign = CharUnits::One();
  462. }
  463. // Append tail padding.
  464. CharUnits recordSize = layout.getSize();
  465. if (recordSize > unionSize)
  466. AppendPadding(recordSize, unionAlign);
  467. }
  468. void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
  469. const CGRecordLayout &baseLayout,
  470. CharUnits baseOffset) {
  471. ResizeLastBaseFieldIfNecessary(baseOffset);
  472. AppendPadding(baseOffset, CharUnits::One());
  473. const ASTRecordLayout &baseASTLayout
  474. = Types.getContext().getASTRecordLayout(base);
  475. LastLaidOutBase.Offset = NextFieldOffset;
  476. LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
  477. // Fields and bases can be laid out in the tail padding of previous
  478. // bases. If this happens, we need to allocate the base as an i8
  479. // array; otherwise, we can use the subobject type. However,
  480. // actually doing that would require knowledge of what immediately
  481. // follows this base in the layout, so instead we do a conservative
  482. // approximation, which is to use the base subobject type if it
  483. // has the same LLVM storage size as the nvsize.
  484. llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
  485. AppendField(baseOffset, subobjectType);
  486. }
  487. void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
  488. CharUnits baseOffset) {
  489. // Ignore empty bases.
  490. if (base->isEmpty()) return;
  491. const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
  492. if (IsZeroInitializableAsBase) {
  493. assert(IsZeroInitializable &&
  494. "class zero-initializable as base but not as complete object");
  495. IsZeroInitializable = IsZeroInitializableAsBase =
  496. baseLayout.isZeroInitializableAsBase();
  497. }
  498. LayoutBase(base, baseLayout, baseOffset);
  499. NonVirtualBases[base] = (FieldTypes.size() - 1);
  500. }
  501. void
  502. CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
  503. CharUnits baseOffset) {
  504. // Ignore empty bases.
  505. if (base->isEmpty()) return;
  506. const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
  507. if (IsZeroInitializable)
  508. IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
  509. LayoutBase(base, baseLayout, baseOffset);
  510. VirtualBases[base] = (FieldTypes.size() - 1);
  511. }
  512. /// LayoutVirtualBases - layout the non-virtual bases of a record decl.
  513. void
  514. CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
  515. const ASTRecordLayout &Layout) {
  516. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
  517. E = RD->bases_end(); I != E; ++I) {
  518. const CXXRecordDecl *BaseDecl =
  519. cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
  520. // We only want to lay out virtual bases that aren't indirect primary bases
  521. // of some other base.
  522. if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
  523. // Only lay out the base once.
  524. if (!LaidOutVirtualBases.insert(BaseDecl))
  525. continue;
  526. CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
  527. LayoutVirtualBase(BaseDecl, vbaseOffset);
  528. }
  529. if (!BaseDecl->getNumVBases()) {
  530. // This base isn't interesting since it doesn't have any virtual bases.
  531. continue;
  532. }
  533. LayoutVirtualBases(BaseDecl, Layout);
  534. }
  535. }
  536. void
  537. CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
  538. const ASTRecordLayout &Layout) {
  539. const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
  540. // Check if we need to add a vtable pointer.
  541. if (RD->isDynamicClass()) {
  542. if (!PrimaryBase) {
  543. llvm::Type *FunctionType =
  544. llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
  545. /*isVarArg=*/true);
  546. llvm::Type *VTableTy = FunctionType->getPointerTo();
  547. assert(NextFieldOffset.isZero() &&
  548. "VTable pointer must come first!");
  549. AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
  550. } else {
  551. if (!Layout.isPrimaryBaseVirtual())
  552. LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero());
  553. else
  554. LayoutVirtualBase(PrimaryBase, CharUnits::Zero());
  555. }
  556. }
  557. // Layout the non-virtual bases.
  558. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
  559. E = RD->bases_end(); I != E; ++I) {
  560. if (I->isVirtual())
  561. continue;
  562. const CXXRecordDecl *BaseDecl =
  563. cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
  564. // We've already laid out the primary base.
  565. if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
  566. continue;
  567. LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
  568. }
  569. }
  570. bool
  571. CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
  572. const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
  573. CharUnits NonVirtualSize = Layout.getNonVirtualSize();
  574. CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
  575. CharUnits AlignedNonVirtualTypeSize =
  576. NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
  577. // First check if we can use the same fields as for the complete class.
  578. CharUnits RecordSize = Layout.getSize();
  579. if (AlignedNonVirtualTypeSize == RecordSize)
  580. return true;
  581. // Check if we need padding.
  582. CharUnits AlignedNextFieldOffset =
  583. NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
  584. if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
  585. assert(!Packed && "cannot layout even as packed struct");
  586. return false; // Needs packing.
  587. }
  588. bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
  589. if (needsPadding) {
  590. CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
  591. FieldTypes.push_back(getByteArrayType(NumBytes));
  592. }
  593. BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
  594. FieldTypes, "", Packed);
  595. Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
  596. // Pull the padding back off.
  597. if (needsPadding)
  598. FieldTypes.pop_back();
  599. return true;
  600. }
  601. bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
  602. assert(!D->isUnion() && "Can't call LayoutFields on a union!");
  603. assert(!Alignment.isZero() && "Did not set alignment!");
  604. const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
  605. const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
  606. if (RD)
  607. LayoutNonVirtualBases(RD, Layout);
  608. unsigned FieldNo = 0;
  609. const FieldDecl *LastFD = 0;
  610. for (RecordDecl::field_iterator Field = D->field_begin(),
  611. FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
  612. if (IsMsStruct) {
  613. // Zero-length bitfields following non-bitfield members are
  614. // ignored:
  615. const FieldDecl *FD = (*Field);
  616. if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
  617. --FieldNo;
  618. continue;
  619. }
  620. LastFD = FD;
  621. }
  622. if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
  623. assert(!Packed &&
  624. "Could not layout fields even with a packed LLVM struct!");
  625. return false;
  626. }
  627. }
  628. if (RD) {
  629. // We've laid out the non-virtual bases and the fields, now compute the
  630. // non-virtual base field types.
  631. if (!ComputeNonVirtualBaseType(RD)) {
  632. assert(!Packed && "Could not layout even with a packed LLVM struct!");
  633. return false;
  634. }
  635. // And lay out the virtual bases.
  636. RD->getIndirectPrimaryBases(IndirectPrimaryBases);
  637. if (Layout.isPrimaryBaseVirtual())
  638. IndirectPrimaryBases.insert(Layout.getPrimaryBase());
  639. LayoutVirtualBases(RD, Layout);
  640. }
  641. // Append tail padding if necessary.
  642. AppendTailPadding(Layout.getSize());
  643. return true;
  644. }
  645. void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
  646. ResizeLastBaseFieldIfNecessary(RecordSize);
  647. assert(NextFieldOffset <= RecordSize && "Size mismatch!");
  648. CharUnits AlignedNextFieldOffset =
  649. NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
  650. if (AlignedNextFieldOffset == RecordSize) {
  651. // We don't need any padding.
  652. return;
  653. }
  654. CharUnits NumPadBytes = RecordSize - NextFieldOffset;
  655. AppendBytes(NumPadBytes);
  656. }
  657. void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
  658. llvm::Type *fieldType) {
  659. CharUnits fieldSize =
  660. CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
  661. FieldTypes.push_back(fieldType);
  662. NextFieldOffset = fieldOffset + fieldSize;
  663. BitsAvailableInLastField = 0;
  664. }
  665. void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
  666. CharUnits fieldAlignment) {
  667. assert(NextFieldOffset <= fieldOffset &&
  668. "Incorrect field layout!");
  669. // Round up the field offset to the alignment of the field type.
  670. CharUnits alignedNextFieldOffset =
  671. NextFieldOffset.RoundUpToAlignment(fieldAlignment);
  672. if (alignedNextFieldOffset < fieldOffset) {
  673. // Even with alignment, the field offset is not at the right place,
  674. // insert padding.
  675. CharUnits padding = fieldOffset - NextFieldOffset;
  676. AppendBytes(padding);
  677. }
  678. }
  679. bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
  680. // Check if we have a base to resize.
  681. if (!LastLaidOutBase.isValid())
  682. return false;
  683. // This offset does not overlap with the tail padding.
  684. if (offset >= NextFieldOffset)
  685. return false;
  686. // Restore the field offset and append an i8 array instead.
  687. FieldTypes.pop_back();
  688. NextFieldOffset = LastLaidOutBase.Offset;
  689. AppendBytes(LastLaidOutBase.NonVirtualSize);
  690. LastLaidOutBase.invalidate();
  691. return true;
  692. }
  693. llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
  694. assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
  695. llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
  696. if (numBytes > CharUnits::One())
  697. Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
  698. return Ty;
  699. }
  700. void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
  701. if (numBytes.isZero())
  702. return;
  703. // Append the padding field
  704. AppendField(NextFieldOffset, getByteArrayType(numBytes));
  705. }
  706. CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
  707. if (Packed)
  708. return CharUnits::One();
  709. return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
  710. }
  711. CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
  712. if (Packed)
  713. return CharUnits::One();
  714. CharUnits maxAlignment = CharUnits::One();
  715. for (size_t i = 0; i != FieldTypes.size(); ++i)
  716. maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
  717. return maxAlignment;
  718. }
  719. /// Merge in whether a field of the given type is zero-initializable.
  720. void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
  721. // This record already contains a member pointer.
  722. if (!IsZeroInitializableAsBase)
  723. return;
  724. // Can only have member pointers if we're compiling C++.
  725. if (!Types.getContext().getLangOptions().CPlusPlus)
  726. return;
  727. const Type *elementType = T->getBaseElementTypeUnsafe();
  728. if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
  729. if (!Types.getCXXABI().isZeroInitializable(MPT))
  730. IsZeroInitializable = IsZeroInitializableAsBase = false;
  731. } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
  732. const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
  733. const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
  734. if (!Layout.isZeroInitializable())
  735. IsZeroInitializable = IsZeroInitializableAsBase = false;
  736. }
  737. }
  738. CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
  739. llvm::StructType *Ty) {
  740. CGRecordLayoutBuilder Builder(*this);
  741. Builder.Layout(D);
  742. Ty->setBody(Builder.FieldTypes, Builder.Packed);
  743. // If we're in C++, compute the base subobject type.
  744. llvm::StructType *BaseTy = 0;
  745. if (isa<CXXRecordDecl>(D)) {
  746. BaseTy = Builder.BaseSubobjectType;
  747. if (!BaseTy) BaseTy = Ty;
  748. }
  749. CGRecordLayout *RL =
  750. new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
  751. Builder.IsZeroInitializableAsBase);
  752. RL->NonVirtualBases.swap(Builder.NonVirtualBases);
  753. RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
  754. // Add all the field numbers.
  755. RL->FieldInfo.swap(Builder.Fields);
  756. // Add bitfield info.
  757. RL->BitFields.swap(Builder.BitFields);
  758. // Dump the layout, if requested.
  759. if (getContext().getLangOptions().DumpRecordLayouts) {
  760. llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
  761. llvm::errs() << "Record: ";
  762. D->dump();
  763. llvm::errs() << "\nLayout: ";
  764. RL->dump();
  765. }
  766. #ifndef NDEBUG
  767. // Verify that the computed LLVM struct size matches the AST layout size.
  768. const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
  769. uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
  770. assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
  771. "Type size mismatch!");
  772. if (BaseTy) {
  773. CharUnits NonVirtualSize = Layout.getNonVirtualSize();
  774. CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
  775. CharUnits AlignedNonVirtualTypeSize =
  776. NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
  777. uint64_t AlignedNonVirtualTypeSizeInBits =
  778. getContext().toBits(AlignedNonVirtualTypeSize);
  779. assert(AlignedNonVirtualTypeSizeInBits ==
  780. getTargetData().getTypeAllocSizeInBits(BaseTy) &&
  781. "Type size mismatch!");
  782. }
  783. // Verify that the LLVM and AST field offsets agree.
  784. llvm::StructType *ST =
  785. dyn_cast<llvm::StructType>(RL->getLLVMType());
  786. const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
  787. const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
  788. RecordDecl::field_iterator it = D->field_begin();
  789. const FieldDecl *LastFD = 0;
  790. bool IsMsStruct = D->hasAttr<MsStructAttr>();
  791. for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
  792. const FieldDecl *FD = *it;
  793. // For non-bit-fields, just check that the LLVM struct offset matches the
  794. // AST offset.
  795. if (!FD->isBitField()) {
  796. unsigned FieldNo = RL->getLLVMFieldNo(FD);
  797. assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
  798. "Invalid field offset!");
  799. LastFD = FD;
  800. continue;
  801. }
  802. if (IsMsStruct) {
  803. // Zero-length bitfields following non-bitfield members are
  804. // ignored:
  805. if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
  806. --i;
  807. continue;
  808. }
  809. LastFD = FD;
  810. }
  811. // Ignore unnamed bit-fields.
  812. if (!FD->getDeclName()) {
  813. LastFD = FD;
  814. continue;
  815. }
  816. const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
  817. for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
  818. const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
  819. // Verify that every component access is within the structure.
  820. uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
  821. uint64_t AccessBitOffset = FieldOffset +
  822. getContext().toBits(AI.FieldByteOffset);
  823. assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
  824. "Invalid bit-field access (out of range)!");
  825. }
  826. }
  827. #endif
  828. return RL;
  829. }
  830. void CGRecordLayout::print(raw_ostream &OS) const {
  831. OS << "<CGRecordLayout\n";
  832. OS << " LLVMType:" << *CompleteObjectType << "\n";
  833. if (BaseSubobjectType)
  834. OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
  835. OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
  836. OS << " BitFields:[\n";
  837. // Print bit-field infos in declaration order.
  838. std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
  839. for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
  840. it = BitFields.begin(), ie = BitFields.end();
  841. it != ie; ++it) {
  842. const RecordDecl *RD = it->first->getParent();
  843. unsigned Index = 0;
  844. for (RecordDecl::field_iterator
  845. it2 = RD->field_begin(); *it2 != it->first; ++it2)
  846. ++Index;
  847. BFIs.push_back(std::make_pair(Index, &it->second));
  848. }
  849. llvm::array_pod_sort(BFIs.begin(), BFIs.end());
  850. for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
  851. OS.indent(4);
  852. BFIs[i].second->print(OS);
  853. OS << "\n";
  854. }
  855. OS << "]>\n";
  856. }
  857. void CGRecordLayout::dump() const {
  858. print(llvm::errs());
  859. }
  860. void CGBitFieldInfo::print(raw_ostream &OS) const {
  861. OS << "<CGBitFieldInfo";
  862. OS << " Size:" << Size;
  863. OS << " IsSigned:" << IsSigned << "\n";
  864. OS.indent(4 + strlen("<CGBitFieldInfo"));
  865. OS << " NumComponents:" << getNumComponents();
  866. OS << " Components: [";
  867. if (getNumComponents()) {
  868. OS << "\n";
  869. for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
  870. const AccessInfo &AI = getComponent(i);
  871. OS.indent(8);
  872. OS << "<AccessInfo"
  873. << " FieldIndex:" << AI.FieldIndex
  874. << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
  875. << " FieldBitStart:" << AI.FieldBitStart
  876. << " AccessWidth:" << AI.AccessWidth << "\n";
  877. OS.indent(8 + strlen("<AccessInfo"));
  878. OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
  879. << " TargetBitOffset:" << AI.TargetBitOffset
  880. << " TargetBitWidth:" << AI.TargetBitWidth
  881. << ">\n";
  882. }
  883. OS.indent(4);
  884. }
  885. OS << "]>";
  886. }
  887. void CGBitFieldInfo::dump() const {
  888. print(llvm::errs());
  889. }