CGRecordLayoutBuilder.cpp 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // Builder implementation for CGRecordLayout objects.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CGRecordLayout.h"
  14. #include "clang/AST/ASTContext.h"
  15. #include "clang/AST/Attr.h"
  16. #include "clang/AST/CXXInheritance.h"
  17. #include "clang/AST/DeclCXX.h"
  18. #include "clang/AST/Expr.h"
  19. #include "clang/AST/RecordLayout.h"
  20. #include "clang/Frontend/CodeGenOptions.h"
  21. #include "CodeGenTypes.h"
  22. #include "CGCXXABI.h"
  23. #include "llvm/DerivedTypes.h"
  24. #include "llvm/Type.h"
  25. #include "llvm/Support/Debug.h"
  26. #include "llvm/Support/raw_ostream.h"
  27. #include "llvm/Target/TargetData.h"
  28. using namespace clang;
  29. using namespace CodeGen;
  30. namespace {
  31. class CGRecordLayoutBuilder {
  32. public:
  33. /// FieldTypes - Holds the LLVM types that the struct is created from.
  34. ///
  35. SmallVector<llvm::Type *, 16> FieldTypes;
  36. /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
  37. /// of the struct. For example, consider:
  38. ///
  39. /// struct A { int i; };
  40. /// struct B { void *v; };
  41. /// struct C : virtual A, B { };
  42. ///
  43. /// The LLVM type of C will be
  44. /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
  45. ///
  46. /// And the LLVM type of the non-virtual base struct will be
  47. /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
  48. ///
  49. /// This only gets initialized if the base subobject type is
  50. /// different from the complete-object type.
  51. llvm::StructType *BaseSubobjectType;
  52. /// FieldInfo - Holds a field and its corresponding LLVM field number.
  53. llvm::DenseMap<const FieldDecl *, unsigned> Fields;
  54. /// BitFieldInfo - Holds location and size information about a bit field.
  55. llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
  56. llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
  57. llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
  58. /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
  59. /// primary base classes for some other direct or indirect base class.
  60. CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
  61. /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
  62. /// avoid laying out virtual bases more than once.
  63. llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
  64. /// IsZeroInitializable - Whether this struct can be C++
  65. /// zero-initialized with an LLVM zeroinitializer.
  66. bool IsZeroInitializable;
  67. bool IsZeroInitializableAsBase;
  68. /// Packed - Whether the resulting LLVM struct will be packed or not.
  69. bool Packed;
  70. /// IsMsStruct - Whether ms_struct is in effect or not
  71. bool IsMsStruct;
  72. private:
  73. CodeGenTypes &Types;
  74. /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
  75. /// last base laid out. Used so that we can replace the last laid out base
  76. /// type with an i8 array if needed.
  77. struct LastLaidOutBaseInfo {
  78. CharUnits Offset;
  79. CharUnits NonVirtualSize;
  80. bool isValid() const { return !NonVirtualSize.isZero(); }
  81. void invalidate() { NonVirtualSize = CharUnits::Zero(); }
  82. } LastLaidOutBase;
  83. /// Alignment - Contains the alignment of the RecordDecl.
  84. CharUnits Alignment;
  85. /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
  86. /// this will have the number of bits still available in the field.
  87. char BitsAvailableInLastField;
  88. /// NextFieldOffset - Holds the next field offset.
  89. CharUnits NextFieldOffset;
  90. /// LayoutUnionField - Will layout a field in an union and return the type
  91. /// that the field will have.
  92. llvm::Type *LayoutUnionField(const FieldDecl *Field,
  93. const ASTRecordLayout &Layout);
  94. /// LayoutUnion - Will layout a union RecordDecl.
  95. void LayoutUnion(const RecordDecl *D);
  96. /// LayoutField - try to layout all fields in the record decl.
  97. /// Returns false if the operation failed because the struct is not packed.
  98. bool LayoutFields(const RecordDecl *D);
  99. /// Layout a single base, virtual or non-virtual
  100. bool LayoutBase(const CXXRecordDecl *base,
  101. const CGRecordLayout &baseLayout,
  102. CharUnits baseOffset);
  103. /// LayoutVirtualBase - layout a single virtual base.
  104. bool LayoutVirtualBase(const CXXRecordDecl *base,
  105. CharUnits baseOffset);
  106. /// LayoutVirtualBases - layout the virtual bases of a record decl.
  107. bool LayoutVirtualBases(const CXXRecordDecl *RD,
  108. const ASTRecordLayout &Layout);
  109. /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
  110. /// like MSVC.
  111. bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
  112. const ASTRecordLayout &Layout);
  113. /// LayoutNonVirtualBase - layout a single non-virtual base.
  114. bool LayoutNonVirtualBase(const CXXRecordDecl *base,
  115. CharUnits baseOffset);
  116. /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
  117. bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
  118. const ASTRecordLayout &Layout);
  119. /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
  120. bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
  121. /// LayoutField - layout a single field. Returns false if the operation failed
  122. /// because the current struct is not packed.
  123. bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
  124. /// LayoutBitField - layout a single bit field.
  125. void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
  126. /// AppendField - Appends a field with the given offset and type.
  127. void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
  128. /// AppendPadding - Appends enough padding bytes so that the total
  129. /// struct size is a multiple of the field alignment.
  130. void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
  131. /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
  132. /// tail padding of a previous base. If this happens, the type of the previous
  133. /// base needs to be changed to an array of i8. Returns true if the last
  134. /// laid out base was resized.
  135. bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
  136. /// getByteArrayType - Returns a byte array type with the given number of
  137. /// elements.
  138. llvm::Type *getByteArrayType(CharUnits NumBytes);
  139. /// AppendBytes - Append a given number of bytes to the record.
  140. void AppendBytes(CharUnits numBytes);
  141. /// AppendTailPadding - Append enough tail padding so that the type will have
  142. /// the passed size.
  143. void AppendTailPadding(CharUnits RecordSize);
  144. CharUnits getTypeAlignment(llvm::Type *Ty) const;
  145. /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
  146. /// LLVM element types.
  147. CharUnits getAlignmentAsLLVMStruct() const;
  148. /// CheckZeroInitializable - Check if the given type contains a pointer
  149. /// to data member.
  150. void CheckZeroInitializable(QualType T);
  151. public:
  152. CGRecordLayoutBuilder(CodeGenTypes &Types)
  153. : BaseSubobjectType(0),
  154. IsZeroInitializable(true), IsZeroInitializableAsBase(true),
  155. Packed(false), IsMsStruct(false),
  156. Types(Types), BitsAvailableInLastField(0) { }
  157. /// Layout - Will layout a RecordDecl.
  158. void Layout(const RecordDecl *D);
  159. };
  160. }
  161. void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
  162. Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
  163. Packed = D->hasAttr<PackedAttr>();
  164. IsMsStruct = D->hasAttr<MsStructAttr>();
  165. if (D->isUnion()) {
  166. LayoutUnion(D);
  167. return;
  168. }
  169. if (LayoutFields(D))
  170. return;
  171. // We weren't able to layout the struct. Try again with a packed struct
  172. Packed = true;
  173. LastLaidOutBase.invalidate();
  174. NextFieldOffset = CharUnits::Zero();
  175. FieldTypes.clear();
  176. Fields.clear();
  177. BitFields.clear();
  178. NonVirtualBases.clear();
  179. VirtualBases.clear();
  180. LayoutFields(D);
  181. }
  182. CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
  183. const FieldDecl *FD,
  184. uint64_t FieldOffset,
  185. uint64_t FieldSize,
  186. uint64_t ContainingTypeSizeInBits,
  187. unsigned ContainingTypeAlign) {
  188. llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
  189. CharUnits TypeSizeInBytes =
  190. CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
  191. uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
  192. bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
  193. if (FieldSize > TypeSizeInBits) {
  194. // We have a wide bit-field. The extra bits are only used for padding, so
  195. // if we have a bitfield of type T, with size N:
  196. //
  197. // T t : N;
  198. //
  199. // We can just assume that it's:
  200. //
  201. // T t : sizeof(T);
  202. //
  203. FieldSize = TypeSizeInBits;
  204. }
  205. // in big-endian machines the first fields are in higher bit positions,
  206. // so revert the offset. The byte offsets are reversed(back) later.
  207. if (Types.getTargetData().isBigEndian()) {
  208. FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
  209. }
  210. // Compute the access components. The policy we use is to start by attempting
  211. // to access using the width of the bit-field type itself and to always access
  212. // at aligned indices of that type. If such an access would fail because it
  213. // extends past the bound of the type, then we reduce size to the next smaller
  214. // power of two and retry. The current algorithm assumes pow2 sized types,
  215. // although this is easy to fix.
  216. //
  217. assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
  218. CGBitFieldInfo::AccessInfo Components[3];
  219. unsigned NumComponents = 0;
  220. unsigned AccessedTargetBits = 0; // The number of target bits accessed.
  221. unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
  222. // If requested, widen the initial bit-field access to be register sized. The
  223. // theory is that this is most likely to allow multiple accesses into the same
  224. // structure to be coalesced, and that the backend should be smart enough to
  225. // narrow the store if no coalescing is ever done.
  226. //
  227. // The subsequent code will handle align these access to common boundaries and
  228. // guaranteeing that we do not access past the end of the structure.
  229. if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
  230. if (AccessWidth < Types.getTarget().getRegisterWidth())
  231. AccessWidth = Types.getTarget().getRegisterWidth();
  232. }
  233. // Round down from the field offset to find the first access position that is
  234. // at an aligned offset of the initial access type.
  235. uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  236. // Adjust initial access size to fit within record.
  237. while (AccessWidth > Types.getTarget().getCharWidth() &&
  238. AccessStart + AccessWidth > ContainingTypeSizeInBits) {
  239. AccessWidth >>= 1;
  240. AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  241. }
  242. while (AccessedTargetBits < FieldSize) {
  243. // Check that we can access using a type of this size, without reading off
  244. // the end of the structure. This can occur with packed structures and
  245. // -fno-bitfield-type-align, for example.
  246. if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
  247. // If so, reduce access size to the next smaller power-of-two and retry.
  248. AccessWidth >>= 1;
  249. assert(AccessWidth >= Types.getTarget().getCharWidth()
  250. && "Cannot access under byte size!");
  251. continue;
  252. }
  253. // Otherwise, add an access component.
  254. // First, compute the bits inside this access which are part of the
  255. // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
  256. // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
  257. // in the target that we are reading.
  258. assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
  259. assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
  260. uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
  261. uint64_t AccessBitsInFieldSize =
  262. std::min(AccessWidth + AccessStart,
  263. FieldOffset + FieldSize) - AccessBitsInFieldStart;
  264. assert(NumComponents < 3 && "Unexpected number of components!");
  265. CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
  266. AI.FieldIndex = 0;
  267. // FIXME: We still follow the old access pattern of only using the field
  268. // byte offset. We should switch this once we fix the struct layout to be
  269. // pretty.
  270. // on big-endian machines we reverted the bit offset because first fields are
  271. // in higher bits. But this also reverts the bytes, so fix this here by reverting
  272. // the byte offset on big-endian machines.
  273. if (Types.getTargetData().isBigEndian()) {
  274. AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
  275. ContainingTypeSizeInBits - AccessStart - AccessWidth);
  276. } else {
  277. AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
  278. }
  279. AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
  280. AI.AccessWidth = AccessWidth;
  281. AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
  282. llvm::MinAlign(ContainingTypeAlign, AccessStart));
  283. AI.TargetBitOffset = AccessedTargetBits;
  284. AI.TargetBitWidth = AccessBitsInFieldSize;
  285. AccessStart += AccessWidth;
  286. AccessedTargetBits += AI.TargetBitWidth;
  287. }
  288. assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
  289. return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
  290. }
  291. CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
  292. const FieldDecl *FD,
  293. uint64_t FieldOffset,
  294. uint64_t FieldSize) {
  295. const RecordDecl *RD = FD->getParent();
  296. const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
  297. uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
  298. unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
  299. return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
  300. ContainingTypeAlign);
  301. }
  302. void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
  303. uint64_t fieldOffset) {
  304. uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
  305. if (fieldSize == 0)
  306. return;
  307. uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
  308. CharUnits numBytesToAppend;
  309. unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
  310. if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
  311. assert(fieldOffset % charAlign == 0 &&
  312. "Field offset not aligned correctly");
  313. CharUnits fieldOffsetInCharUnits =
  314. Types.getContext().toCharUnitsFromBits(fieldOffset);
  315. // Try to resize the last base field.
  316. if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
  317. nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
  318. }
  319. if (fieldOffset < nextFieldOffsetInBits) {
  320. assert(BitsAvailableInLastField && "Bitfield size mismatch!");
  321. assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
  322. // The bitfield begins in the previous bit-field.
  323. numBytesToAppend = Types.getContext().toCharUnitsFromBits(
  324. llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
  325. charAlign));
  326. } else {
  327. assert(fieldOffset % charAlign == 0 &&
  328. "Field offset not aligned correctly");
  329. // Append padding if necessary.
  330. AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
  331. CharUnits::One());
  332. numBytesToAppend = Types.getContext().toCharUnitsFromBits(
  333. llvm::RoundUpToAlignment(fieldSize, charAlign));
  334. assert(!numBytesToAppend.isZero() && "No bytes to append!");
  335. }
  336. // Add the bit field info.
  337. BitFields.insert(std::make_pair(D,
  338. CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
  339. AppendBytes(numBytesToAppend);
  340. BitsAvailableInLastField =
  341. Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
  342. }
  343. bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
  344. uint64_t fieldOffset) {
  345. // If the field is packed, then we need a packed struct.
  346. if (!Packed && D->hasAttr<PackedAttr>())
  347. return false;
  348. if (D->isBitField()) {
  349. // We must use packed structs for unnamed bit fields since they
  350. // don't affect the struct alignment.
  351. if (!Packed && !D->getDeclName())
  352. return false;
  353. LayoutBitField(D, fieldOffset);
  354. return true;
  355. }
  356. CheckZeroInitializable(D->getType());
  357. assert(fieldOffset % Types.getTarget().getCharWidth() == 0
  358. && "field offset is not on a byte boundary!");
  359. CharUnits fieldOffsetInBytes
  360. = Types.getContext().toCharUnitsFromBits(fieldOffset);
  361. llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
  362. CharUnits typeAlignment = getTypeAlignment(Ty);
  363. // If the type alignment is larger then the struct alignment, we must use
  364. // a packed struct.
  365. if (typeAlignment > Alignment) {
  366. assert(!Packed && "Alignment is wrong even with packed struct!");
  367. return false;
  368. }
  369. if (!Packed) {
  370. if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
  371. const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
  372. if (const MaxFieldAlignmentAttr *MFAA =
  373. RD->getAttr<MaxFieldAlignmentAttr>()) {
  374. if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
  375. return false;
  376. }
  377. }
  378. }
  379. // Round up the field offset to the alignment of the field type.
  380. CharUnits alignedNextFieldOffsetInBytes =
  381. NextFieldOffset.RoundUpToAlignment(typeAlignment);
  382. if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
  383. // Try to resize the last base field.
  384. if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
  385. alignedNextFieldOffsetInBytes =
  386. NextFieldOffset.RoundUpToAlignment(typeAlignment);
  387. }
  388. }
  389. if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
  390. assert(!Packed && "Could not place field even with packed struct!");
  391. return false;
  392. }
  393. AppendPadding(fieldOffsetInBytes, typeAlignment);
  394. // Now append the field.
  395. Fields[D] = FieldTypes.size();
  396. AppendField(fieldOffsetInBytes, Ty);
  397. LastLaidOutBase.invalidate();
  398. return true;
  399. }
  400. llvm::Type *
  401. CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
  402. const ASTRecordLayout &Layout) {
  403. if (Field->isBitField()) {
  404. uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
  405. // Ignore zero sized bit fields.
  406. if (FieldSize == 0)
  407. return 0;
  408. llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
  409. CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
  410. llvm::RoundUpToAlignment(FieldSize,
  411. Types.getContext().getTargetInfo().getCharAlign()));
  412. if (NumBytesToAppend > CharUnits::One())
  413. FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
  414. // Add the bit field info.
  415. BitFields.insert(std::make_pair(Field,
  416. CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
  417. return FieldTy;
  418. }
  419. // This is a regular union field.
  420. Fields[Field] = 0;
  421. return Types.ConvertTypeForMem(Field->getType());
  422. }
  423. void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
  424. assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
  425. const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
  426. llvm::Type *unionType = 0;
  427. CharUnits unionSize = CharUnits::Zero();
  428. CharUnits unionAlign = CharUnits::Zero();
  429. bool hasOnlyZeroSizedBitFields = true;
  430. bool checkedFirstFieldZeroInit = false;
  431. unsigned fieldNo = 0;
  432. for (RecordDecl::field_iterator field = D->field_begin(),
  433. fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
  434. assert(layout.getFieldOffset(fieldNo) == 0 &&
  435. "Union field offset did not start at the beginning of record!");
  436. llvm::Type *fieldType = LayoutUnionField(&*field, layout);
  437. if (!fieldType)
  438. continue;
  439. if (field->getDeclName() && !checkedFirstFieldZeroInit) {
  440. CheckZeroInitializable(field->getType());
  441. checkedFirstFieldZeroInit = true;
  442. }
  443. hasOnlyZeroSizedBitFields = false;
  444. CharUnits fieldAlign = CharUnits::fromQuantity(
  445. Types.getTargetData().getABITypeAlignment(fieldType));
  446. CharUnits fieldSize = CharUnits::fromQuantity(
  447. Types.getTargetData().getTypeAllocSize(fieldType));
  448. if (fieldAlign < unionAlign)
  449. continue;
  450. if (fieldAlign > unionAlign || fieldSize > unionSize) {
  451. unionType = fieldType;
  452. unionAlign = fieldAlign;
  453. unionSize = fieldSize;
  454. }
  455. }
  456. // Now add our field.
  457. if (unionType) {
  458. AppendField(CharUnits::Zero(), unionType);
  459. if (getTypeAlignment(unionType) > layout.getAlignment()) {
  460. // We need a packed struct.
  461. Packed = true;
  462. unionAlign = CharUnits::One();
  463. }
  464. }
  465. if (unionAlign.isZero()) {
  466. (void)hasOnlyZeroSizedBitFields;
  467. assert(hasOnlyZeroSizedBitFields &&
  468. "0-align record did not have all zero-sized bit-fields!");
  469. unionAlign = CharUnits::One();
  470. }
  471. // Append tail padding.
  472. CharUnits recordSize = layout.getSize();
  473. if (recordSize > unionSize)
  474. AppendPadding(recordSize, unionAlign);
  475. }
  476. bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
  477. const CGRecordLayout &baseLayout,
  478. CharUnits baseOffset) {
  479. ResizeLastBaseFieldIfNecessary(baseOffset);
  480. AppendPadding(baseOffset, CharUnits::One());
  481. const ASTRecordLayout &baseASTLayout
  482. = Types.getContext().getASTRecordLayout(base);
  483. LastLaidOutBase.Offset = NextFieldOffset;
  484. LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
  485. llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
  486. if (getTypeAlignment(subobjectType) > Alignment)
  487. return false;
  488. AppendField(baseOffset, subobjectType);
  489. return true;
  490. }
  491. bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
  492. CharUnits baseOffset) {
  493. // Ignore empty bases.
  494. if (base->isEmpty()) return true;
  495. const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
  496. if (IsZeroInitializableAsBase) {
  497. assert(IsZeroInitializable &&
  498. "class zero-initializable as base but not as complete object");
  499. IsZeroInitializable = IsZeroInitializableAsBase =
  500. baseLayout.isZeroInitializableAsBase();
  501. }
  502. if (!LayoutBase(base, baseLayout, baseOffset))
  503. return false;
  504. NonVirtualBases[base] = (FieldTypes.size() - 1);
  505. return true;
  506. }
  507. bool
  508. CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
  509. CharUnits baseOffset) {
  510. // Ignore empty bases.
  511. if (base->isEmpty()) return true;
  512. const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
  513. if (IsZeroInitializable)
  514. IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
  515. if (!LayoutBase(base, baseLayout, baseOffset))
  516. return false;
  517. VirtualBases[base] = (FieldTypes.size() - 1);
  518. return true;
  519. }
  520. bool
  521. CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
  522. const ASTRecordLayout &Layout) {
  523. if (!RD->getNumVBases())
  524. return true;
  525. // The vbases list is uniqued and ordered by a depth-first
  526. // traversal, which is what we need here.
  527. for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
  528. E = RD->vbases_end(); I != E; ++I) {
  529. const CXXRecordDecl *BaseDecl =
  530. cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
  531. CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
  532. if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
  533. return false;
  534. }
  535. return true;
  536. }
  537. /// LayoutVirtualBases - layout the non-virtual bases of a record decl.
  538. bool
  539. CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
  540. const ASTRecordLayout &Layout) {
  541. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
  542. E = RD->bases_end(); I != E; ++I) {
  543. const CXXRecordDecl *BaseDecl =
  544. cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
  545. // We only want to lay out virtual bases that aren't indirect primary bases
  546. // of some other base.
  547. if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
  548. // Only lay out the base once.
  549. if (!LaidOutVirtualBases.insert(BaseDecl))
  550. continue;
  551. CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
  552. if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
  553. return false;
  554. }
  555. if (!BaseDecl->getNumVBases()) {
  556. // This base isn't interesting since it doesn't have any virtual bases.
  557. continue;
  558. }
  559. if (!LayoutVirtualBases(BaseDecl, Layout))
  560. return false;
  561. }
  562. return true;
  563. }
  564. bool
  565. CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
  566. const ASTRecordLayout &Layout) {
  567. const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
  568. // If we have a primary base, lay it out first.
  569. if (PrimaryBase) {
  570. if (!Layout.isPrimaryBaseVirtual()) {
  571. if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
  572. return false;
  573. } else {
  574. if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
  575. return false;
  576. }
  577. // Otherwise, add a vtable / vf-table if the layout says to do so.
  578. } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
  579. ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
  580. : RD->isDynamicClass()) {
  581. llvm::Type *FunctionType =
  582. llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
  583. /*isVarArg=*/true);
  584. llvm::Type *VTableTy = FunctionType->getPointerTo();
  585. if (getTypeAlignment(VTableTy) > Alignment) {
  586. // FIXME: Should we allow this to happen in Sema?
  587. assert(!Packed && "Alignment is wrong even with packed struct!");
  588. return false;
  589. }
  590. assert(NextFieldOffset.isZero() &&
  591. "VTable pointer must come first!");
  592. AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
  593. }
  594. // Layout the non-virtual bases.
  595. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
  596. E = RD->bases_end(); I != E; ++I) {
  597. if (I->isVirtual())
  598. continue;
  599. const CXXRecordDecl *BaseDecl =
  600. cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
  601. // We've already laid out the primary base.
  602. if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
  603. continue;
  604. if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
  605. return false;
  606. }
  607. // Add a vb-table pointer if the layout insists.
  608. if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
  609. CharUnits VBPtrOffset = Layout.getVBPtrOffset();
  610. llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
  611. AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
  612. AppendField(VBPtrOffset, Vbptr);
  613. }
  614. return true;
  615. }
  616. bool
  617. CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
  618. const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
  619. CharUnits NonVirtualSize = Layout.getNonVirtualSize();
  620. CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
  621. CharUnits AlignedNonVirtualTypeSize =
  622. NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
  623. // First check if we can use the same fields as for the complete class.
  624. CharUnits RecordSize = Layout.getSize();
  625. if (AlignedNonVirtualTypeSize == RecordSize)
  626. return true;
  627. // Check if we need padding.
  628. CharUnits AlignedNextFieldOffset =
  629. NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
  630. if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
  631. assert(!Packed && "cannot layout even as packed struct");
  632. return false; // Needs packing.
  633. }
  634. bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
  635. if (needsPadding) {
  636. CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
  637. FieldTypes.push_back(getByteArrayType(NumBytes));
  638. }
  639. BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
  640. FieldTypes, "", Packed);
  641. Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
  642. // Pull the padding back off.
  643. if (needsPadding)
  644. FieldTypes.pop_back();
  645. return true;
  646. }
  647. bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
  648. assert(!D->isUnion() && "Can't call LayoutFields on a union!");
  649. assert(!Alignment.isZero() && "Did not set alignment!");
  650. const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
  651. const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
  652. if (RD)
  653. if (!LayoutNonVirtualBases(RD, Layout))
  654. return false;
  655. unsigned FieldNo = 0;
  656. const FieldDecl *LastFD = 0;
  657. for (RecordDecl::field_iterator Field = D->field_begin(),
  658. FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
  659. if (IsMsStruct) {
  660. // Zero-length bitfields following non-bitfield members are
  661. // ignored:
  662. const FieldDecl *FD = &*Field;
  663. if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
  664. --FieldNo;
  665. continue;
  666. }
  667. LastFD = FD;
  668. }
  669. if (!LayoutField(&*Field, Layout.getFieldOffset(FieldNo))) {
  670. assert(!Packed &&
  671. "Could not layout fields even with a packed LLVM struct!");
  672. return false;
  673. }
  674. }
  675. if (RD) {
  676. // We've laid out the non-virtual bases and the fields, now compute the
  677. // non-virtual base field types.
  678. if (!ComputeNonVirtualBaseType(RD)) {
  679. assert(!Packed && "Could not layout even with a packed LLVM struct!");
  680. return false;
  681. }
  682. // Lay out the virtual bases. The MS ABI uses a different
  683. // algorithm here due to the lack of primary virtual bases.
  684. if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
  685. RD->getIndirectPrimaryBases(IndirectPrimaryBases);
  686. if (Layout.isPrimaryBaseVirtual())
  687. IndirectPrimaryBases.insert(Layout.getPrimaryBase());
  688. if (!LayoutVirtualBases(RD, Layout))
  689. return false;
  690. } else {
  691. if (!MSLayoutVirtualBases(RD, Layout))
  692. return false;
  693. }
  694. }
  695. // Append tail padding if necessary.
  696. AppendTailPadding(Layout.getSize());
  697. return true;
  698. }
  699. void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
  700. ResizeLastBaseFieldIfNecessary(RecordSize);
  701. assert(NextFieldOffset <= RecordSize && "Size mismatch!");
  702. CharUnits AlignedNextFieldOffset =
  703. NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
  704. if (AlignedNextFieldOffset == RecordSize) {
  705. // We don't need any padding.
  706. return;
  707. }
  708. CharUnits NumPadBytes = RecordSize - NextFieldOffset;
  709. AppendBytes(NumPadBytes);
  710. }
  711. void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
  712. llvm::Type *fieldType) {
  713. CharUnits fieldSize =
  714. CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
  715. FieldTypes.push_back(fieldType);
  716. NextFieldOffset = fieldOffset + fieldSize;
  717. BitsAvailableInLastField = 0;
  718. }
  719. void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
  720. CharUnits fieldAlignment) {
  721. assert(NextFieldOffset <= fieldOffset &&
  722. "Incorrect field layout!");
  723. // Do nothing if we're already at the right offset.
  724. if (fieldOffset == NextFieldOffset) return;
  725. // If we're not emitting a packed LLVM type, try to avoid adding
  726. // unnecessary padding fields.
  727. if (!Packed) {
  728. // Round up the field offset to the alignment of the field type.
  729. CharUnits alignedNextFieldOffset =
  730. NextFieldOffset.RoundUpToAlignment(fieldAlignment);
  731. assert(alignedNextFieldOffset <= fieldOffset);
  732. // If that's the right offset, we're done.
  733. if (alignedNextFieldOffset == fieldOffset) return;
  734. }
  735. // Otherwise we need explicit padding.
  736. CharUnits padding = fieldOffset - NextFieldOffset;
  737. AppendBytes(padding);
  738. }
  739. bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
  740. // Check if we have a base to resize.
  741. if (!LastLaidOutBase.isValid())
  742. return false;
  743. // This offset does not overlap with the tail padding.
  744. if (offset >= NextFieldOffset)
  745. return false;
  746. // Restore the field offset and append an i8 array instead.
  747. FieldTypes.pop_back();
  748. NextFieldOffset = LastLaidOutBase.Offset;
  749. AppendBytes(LastLaidOutBase.NonVirtualSize);
  750. LastLaidOutBase.invalidate();
  751. return true;
  752. }
  753. llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
  754. assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
  755. llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
  756. if (numBytes > CharUnits::One())
  757. Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
  758. return Ty;
  759. }
  760. void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
  761. if (numBytes.isZero())
  762. return;
  763. // Append the padding field
  764. AppendField(NextFieldOffset, getByteArrayType(numBytes));
  765. }
  766. CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
  767. if (Packed)
  768. return CharUnits::One();
  769. return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
  770. }
  771. CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
  772. if (Packed)
  773. return CharUnits::One();
  774. CharUnits maxAlignment = CharUnits::One();
  775. for (size_t i = 0; i != FieldTypes.size(); ++i)
  776. maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
  777. return maxAlignment;
  778. }
  779. /// Merge in whether a field of the given type is zero-initializable.
  780. void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
  781. // This record already contains a member pointer.
  782. if (!IsZeroInitializableAsBase)
  783. return;
  784. // Can only have member pointers if we're compiling C++.
  785. if (!Types.getContext().getLangOpts().CPlusPlus)
  786. return;
  787. const Type *elementType = T->getBaseElementTypeUnsafe();
  788. if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
  789. if (!Types.getCXXABI().isZeroInitializable(MPT))
  790. IsZeroInitializable = IsZeroInitializableAsBase = false;
  791. } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
  792. const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
  793. const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
  794. if (!Layout.isZeroInitializable())
  795. IsZeroInitializable = IsZeroInitializableAsBase = false;
  796. }
  797. }
  798. CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
  799. llvm::StructType *Ty) {
  800. CGRecordLayoutBuilder Builder(*this);
  801. Builder.Layout(D);
  802. Ty->setBody(Builder.FieldTypes, Builder.Packed);
  803. // If we're in C++, compute the base subobject type.
  804. llvm::StructType *BaseTy = 0;
  805. if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
  806. BaseTy = Builder.BaseSubobjectType;
  807. if (!BaseTy) BaseTy = Ty;
  808. }
  809. CGRecordLayout *RL =
  810. new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
  811. Builder.IsZeroInitializableAsBase);
  812. RL->NonVirtualBases.swap(Builder.NonVirtualBases);
  813. RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
  814. // Add all the field numbers.
  815. RL->FieldInfo.swap(Builder.Fields);
  816. // Add bitfield info.
  817. RL->BitFields.swap(Builder.BitFields);
  818. // Dump the layout, if requested.
  819. if (getContext().getLangOpts().DumpRecordLayouts) {
  820. llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
  821. llvm::errs() << "Record: ";
  822. D->dump();
  823. llvm::errs() << "\nLayout: ";
  824. RL->dump();
  825. }
  826. #ifndef NDEBUG
  827. // Verify that the computed LLVM struct size matches the AST layout size.
  828. const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
  829. uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
  830. assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
  831. "Type size mismatch!");
  832. if (BaseTy) {
  833. CharUnits NonVirtualSize = Layout.getNonVirtualSize();
  834. CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
  835. CharUnits AlignedNonVirtualTypeSize =
  836. NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
  837. uint64_t AlignedNonVirtualTypeSizeInBits =
  838. getContext().toBits(AlignedNonVirtualTypeSize);
  839. assert(AlignedNonVirtualTypeSizeInBits ==
  840. getTargetData().getTypeAllocSizeInBits(BaseTy) &&
  841. "Type size mismatch!");
  842. }
  843. // Verify that the LLVM and AST field offsets agree.
  844. llvm::StructType *ST =
  845. dyn_cast<llvm::StructType>(RL->getLLVMType());
  846. const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
  847. const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
  848. RecordDecl::field_iterator it = D->field_begin();
  849. const FieldDecl *LastFD = 0;
  850. bool IsMsStruct = D->hasAttr<MsStructAttr>();
  851. for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
  852. const FieldDecl *FD = &*it;
  853. // For non-bit-fields, just check that the LLVM struct offset matches the
  854. // AST offset.
  855. if (!FD->isBitField()) {
  856. unsigned FieldNo = RL->getLLVMFieldNo(FD);
  857. assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
  858. "Invalid field offset!");
  859. LastFD = FD;
  860. continue;
  861. }
  862. if (IsMsStruct) {
  863. // Zero-length bitfields following non-bitfield members are
  864. // ignored:
  865. if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
  866. --i;
  867. continue;
  868. }
  869. LastFD = FD;
  870. }
  871. // Ignore unnamed bit-fields.
  872. if (!FD->getDeclName()) {
  873. LastFD = FD;
  874. continue;
  875. }
  876. const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
  877. for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
  878. const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
  879. // Verify that every component access is within the structure.
  880. uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
  881. uint64_t AccessBitOffset = FieldOffset +
  882. getContext().toBits(AI.FieldByteOffset);
  883. assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
  884. "Invalid bit-field access (out of range)!");
  885. }
  886. }
  887. #endif
  888. return RL;
  889. }
  890. void CGRecordLayout::print(raw_ostream &OS) const {
  891. OS << "<CGRecordLayout\n";
  892. OS << " LLVMType:" << *CompleteObjectType << "\n";
  893. if (BaseSubobjectType)
  894. OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
  895. OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
  896. OS << " BitFields:[\n";
  897. // Print bit-field infos in declaration order.
  898. std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
  899. for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
  900. it = BitFields.begin(), ie = BitFields.end();
  901. it != ie; ++it) {
  902. const RecordDecl *RD = it->first->getParent();
  903. unsigned Index = 0;
  904. for (RecordDecl::field_iterator
  905. it2 = RD->field_begin(); &*it2 != it->first; ++it2)
  906. ++Index;
  907. BFIs.push_back(std::make_pair(Index, &it->second));
  908. }
  909. llvm::array_pod_sort(BFIs.begin(), BFIs.end());
  910. for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
  911. OS.indent(4);
  912. BFIs[i].second->print(OS);
  913. OS << "\n";
  914. }
  915. OS << "]>\n";
  916. }
  917. void CGRecordLayout::dump() const {
  918. print(llvm::errs());
  919. }
  920. void CGBitFieldInfo::print(raw_ostream &OS) const {
  921. OS << "<CGBitFieldInfo";
  922. OS << " Size:" << Size;
  923. OS << " IsSigned:" << IsSigned << "\n";
  924. OS.indent(4 + strlen("<CGBitFieldInfo"));
  925. OS << " NumComponents:" << getNumComponents();
  926. OS << " Components: [";
  927. if (getNumComponents()) {
  928. OS << "\n";
  929. for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
  930. const AccessInfo &AI = getComponent(i);
  931. OS.indent(8);
  932. OS << "<AccessInfo"
  933. << " FieldIndex:" << AI.FieldIndex
  934. << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
  935. << " FieldBitStart:" << AI.FieldBitStart
  936. << " AccessWidth:" << AI.AccessWidth << "\n";
  937. OS.indent(8 + strlen("<AccessInfo"));
  938. OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
  939. << " TargetBitOffset:" << AI.TargetBitOffset
  940. << " TargetBitWidth:" << AI.TargetBitWidth
  941. << ">\n";
  942. }
  943. OS.indent(4);
  944. }
  945. OS << "]>";
  946. }
  947. void CGBitFieldInfo::dump() const {
  948. print(llvm::errs());
  949. }