CGRecordLayoutBuilder.cpp 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // Builder implementation for CGRecordLayout objects.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CGRecordLayout.h"
  14. #include "clang/AST/ASTContext.h"
  15. #include "clang/AST/Attr.h"
  16. #include "clang/AST/CXXInheritance.h"
  17. #include "clang/AST/DeclCXX.h"
  18. #include "clang/AST/Expr.h"
  19. #include "clang/AST/RecordLayout.h"
  20. #include "clang/Frontend/CodeGenOptions.h"
  21. #include "CodeGenTypes.h"
  22. #include "CGCXXABI.h"
  23. #include "llvm/DerivedTypes.h"
  24. #include "llvm/Type.h"
  25. #include "llvm/Support/Debug.h"
  26. #include "llvm/Support/raw_ostream.h"
  27. #include "llvm/Target/TargetData.h"
  28. using namespace clang;
  29. using namespace CodeGen;
  30. namespace {
  31. class CGRecordLayoutBuilder {
  32. public:
  33. /// FieldTypes - Holds the LLVM types that the struct is created from.
  34. ///
  35. SmallVector<llvm::Type *, 16> FieldTypes;
  36. /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
  37. /// of the struct. For example, consider:
  38. ///
  39. /// struct A { int i; };
  40. /// struct B { void *v; };
  41. /// struct C : virtual A, B { };
  42. ///
  43. /// The LLVM type of C will be
  44. /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
  45. ///
  46. /// And the LLVM type of the non-virtual base struct will be
  47. /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
  48. ///
  49. /// This only gets initialized if the base subobject type is
  50. /// different from the complete-object type.
  51. llvm::StructType *BaseSubobjectType;
  52. /// FieldInfo - Holds a field and its corresponding LLVM field number.
  53. llvm::DenseMap<const FieldDecl *, unsigned> Fields;
  54. /// BitFieldInfo - Holds location and size information about a bit field.
  55. llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
  56. llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
  57. llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
  58. /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
  59. /// primary base classes for some other direct or indirect base class.
  60. CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
  61. /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
  62. /// avoid laying out virtual bases more than once.
  63. llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
  64. /// IsZeroInitializable - Whether this struct can be C++
  65. /// zero-initialized with an LLVM zeroinitializer.
  66. bool IsZeroInitializable;
  67. bool IsZeroInitializableAsBase;
  68. /// Packed - Whether the resulting LLVM struct will be packed or not.
  69. bool Packed;
  70. /// IsMsStruct - Whether ms_struct is in effect or not
  71. bool IsMsStruct;
  72. private:
  73. CodeGenTypes &Types;
  74. /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
  75. /// last base laid out. Used so that we can replace the last laid out base
  76. /// type with an i8 array if needed.
  77. struct LastLaidOutBaseInfo {
  78. CharUnits Offset;
  79. CharUnits NonVirtualSize;
  80. bool isValid() const { return !NonVirtualSize.isZero(); }
  81. void invalidate() { NonVirtualSize = CharUnits::Zero(); }
  82. } LastLaidOutBase;
  83. /// Alignment - Contains the alignment of the RecordDecl.
  84. CharUnits Alignment;
  85. /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
  86. /// this will have the number of bits still available in the field.
  87. char BitsAvailableInLastField;
  88. /// NextFieldOffset - Holds the next field offset.
  89. CharUnits NextFieldOffset;
  90. /// LayoutUnionField - Will layout a field in an union and return the type
  91. /// that the field will have.
  92. llvm::Type *LayoutUnionField(const FieldDecl *Field,
  93. const ASTRecordLayout &Layout);
  94. /// LayoutUnion - Will layout a union RecordDecl.
  95. void LayoutUnion(const RecordDecl *D);
  96. /// LayoutField - try to layout all fields in the record decl.
  97. /// Returns false if the operation failed because the struct is not packed.
  98. bool LayoutFields(const RecordDecl *D);
  99. /// Layout a single base, virtual or non-virtual
  100. bool LayoutBase(const CXXRecordDecl *base,
  101. const CGRecordLayout &baseLayout,
  102. CharUnits baseOffset);
  103. /// LayoutVirtualBase - layout a single virtual base.
  104. bool LayoutVirtualBase(const CXXRecordDecl *base,
  105. CharUnits baseOffset);
  106. /// LayoutVirtualBases - layout the virtual bases of a record decl.
  107. bool LayoutVirtualBases(const CXXRecordDecl *RD,
  108. const ASTRecordLayout &Layout);
  109. /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
  110. /// like MSVC.
  111. bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
  112. const ASTRecordLayout &Layout);
  113. /// LayoutNonVirtualBase - layout a single non-virtual base.
  114. bool LayoutNonVirtualBase(const CXXRecordDecl *base,
  115. CharUnits baseOffset);
  116. /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
  117. bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
  118. const ASTRecordLayout &Layout);
  119. /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
  120. bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
  121. /// LayoutField - layout a single field. Returns false if the operation failed
  122. /// because the current struct is not packed.
  123. bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
  124. /// LayoutBitField - layout a single bit field.
  125. void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
  126. /// AppendField - Appends a field with the given offset and type.
  127. void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
  128. /// AppendPadding - Appends enough padding bytes so that the total
  129. /// struct size is a multiple of the field alignment.
  130. void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
  131. /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
  132. /// tail padding of a previous base. If this happens, the type of the previous
  133. /// base needs to be changed to an array of i8. Returns true if the last
  134. /// laid out base was resized.
  135. bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
  136. /// getByteArrayType - Returns a byte array type with the given number of
  137. /// elements.
  138. llvm::Type *getByteArrayType(CharUnits NumBytes);
  139. /// AppendBytes - Append a given number of bytes to the record.
  140. void AppendBytes(CharUnits numBytes);
  141. /// AppendTailPadding - Append enough tail padding so that the type will have
  142. /// the passed size.
  143. void AppendTailPadding(CharUnits RecordSize);
  144. CharUnits getTypeAlignment(llvm::Type *Ty) const;
  145. /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
  146. /// LLVM element types.
  147. CharUnits getAlignmentAsLLVMStruct() const;
  148. /// CheckZeroInitializable - Check if the given type contains a pointer
  149. /// to data member.
  150. void CheckZeroInitializable(QualType T);
  151. public:
  152. CGRecordLayoutBuilder(CodeGenTypes &Types)
  153. : BaseSubobjectType(0),
  154. IsZeroInitializable(true), IsZeroInitializableAsBase(true),
  155. Packed(false), IsMsStruct(false),
  156. Types(Types), BitsAvailableInLastField(0) { }
  157. /// Layout - Will layout a RecordDecl.
  158. void Layout(const RecordDecl *D);
  159. };
  160. }
  161. void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
  162. Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
  163. Packed = D->hasAttr<PackedAttr>();
  164. IsMsStruct = D->hasAttr<MsStructAttr>();
  165. if (D->isUnion()) {
  166. LayoutUnion(D);
  167. return;
  168. }
  169. if (LayoutFields(D))
  170. return;
  171. // We weren't able to layout the struct. Try again with a packed struct
  172. Packed = true;
  173. LastLaidOutBase.invalidate();
  174. NextFieldOffset = CharUnits::Zero();
  175. FieldTypes.clear();
  176. Fields.clear();
  177. BitFields.clear();
  178. NonVirtualBases.clear();
  179. VirtualBases.clear();
  180. LayoutFields(D);
  181. }
  182. CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
  183. const FieldDecl *FD,
  184. uint64_t FieldOffset,
  185. uint64_t FieldSize,
  186. uint64_t ContainingTypeSizeInBits,
  187. unsigned ContainingTypeAlign) {
  188. llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
  189. CharUnits TypeSizeInBytes =
  190. CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
  191. uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
  192. bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
  193. if (FieldSize > TypeSizeInBits) {
  194. // We have a wide bit-field. The extra bits are only used for padding, so
  195. // if we have a bitfield of type T, with size N:
  196. //
  197. // T t : N;
  198. //
  199. // We can just assume that it's:
  200. //
  201. // T t : sizeof(T);
  202. //
  203. FieldSize = TypeSizeInBits;
  204. }
  205. // in big-endian machines the first fields are in higher bit positions,
  206. // so revert the offset. The byte offsets are reversed(back) later.
  207. if (Types.getTargetData().isBigEndian()) {
  208. FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
  209. }
  210. // Compute the access components. The policy we use is to start by attempting
  211. // to access using the width of the bit-field type itself and to always access
  212. // at aligned indices of that type. If such an access would fail because it
  213. // extends past the bound of the type, then we reduce size to the next smaller
  214. // power of two and retry. The current algorithm assumes pow2 sized types,
  215. // although this is easy to fix.
  216. //
  217. assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
  218. CGBitFieldInfo::AccessInfo Components[3];
  219. unsigned NumComponents = 0;
  220. unsigned AccessedTargetBits = 0; // The number of target bits accessed.
  221. unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
  222. // If requested, widen the initial bit-field access to be register sized. The
  223. // theory is that this is most likely to allow multiple accesses into the same
  224. // structure to be coalesced, and that the backend should be smart enough to
  225. // narrow the store if no coalescing is ever done.
  226. //
  227. // The subsequent code will handle align these access to common boundaries and
  228. // guaranteeing that we do not access past the end of the structure.
  229. if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
  230. if (AccessWidth < Types.getTarget().getRegisterWidth())
  231. AccessWidth = Types.getTarget().getRegisterWidth();
  232. }
  233. // Round down from the field offset to find the first access position that is
  234. // at an aligned offset of the initial access type.
  235. uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  236. // Adjust initial access size to fit within record.
  237. while (AccessWidth > Types.getTarget().getCharWidth() &&
  238. AccessStart + AccessWidth > ContainingTypeSizeInBits) {
  239. AccessWidth >>= 1;
  240. AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  241. }
  242. while (AccessedTargetBits < FieldSize) {
  243. // Check that we can access using a type of this size, without reading off
  244. // the end of the structure. This can occur with packed structures and
  245. // -fno-bitfield-type-align, for example.
  246. if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
  247. // If so, reduce access size to the next smaller power-of-two and retry.
  248. AccessWidth >>= 1;
  249. assert(AccessWidth >= Types.getTarget().getCharWidth()
  250. && "Cannot access under byte size!");
  251. continue;
  252. }
  253. // Otherwise, add an access component.
  254. // First, compute the bits inside this access which are part of the
  255. // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
  256. // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
  257. // in the target that we are reading.
  258. assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
  259. assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
  260. uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
  261. uint64_t AccessBitsInFieldSize =
  262. std::min(AccessWidth + AccessStart,
  263. FieldOffset + FieldSize) - AccessBitsInFieldStart;
  264. assert(NumComponents < 3 && "Unexpected number of components!");
  265. CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
  266. AI.FieldIndex = 0;
  267. // FIXME: We still follow the old access pattern of only using the field
  268. // byte offset. We should switch this once we fix the struct layout to be
  269. // pretty.
  270. // on big-endian machines we reverted the bit offset because first fields are
  271. // in higher bits. But this also reverts the bytes, so fix this here by reverting
  272. // the byte offset on big-endian machines.
  273. if (Types.getTargetData().isBigEndian()) {
  274. AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
  275. ContainingTypeSizeInBits - AccessStart - AccessWidth);
  276. } else {
  277. AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
  278. }
  279. AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
  280. AI.AccessWidth = AccessWidth;
  281. AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
  282. llvm::MinAlign(ContainingTypeAlign, AccessStart));
  283. AI.TargetBitOffset = AccessedTargetBits;
  284. AI.TargetBitWidth = AccessBitsInFieldSize;
  285. AccessStart += AccessWidth;
  286. AccessedTargetBits += AI.TargetBitWidth;
  287. }
  288. assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
  289. return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
  290. }
  291. CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
  292. const FieldDecl *FD,
  293. uint64_t FieldOffset,
  294. uint64_t FieldSize) {
  295. const RecordDecl *RD = FD->getParent();
  296. const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
  297. uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
  298. unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
  299. return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
  300. ContainingTypeAlign);
  301. }
  302. void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
  303. uint64_t fieldOffset) {
  304. uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
  305. if (fieldSize == 0)
  306. return;
  307. uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
  308. CharUnits numBytesToAppend;
  309. unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
  310. if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
  311. assert(fieldOffset % charAlign == 0 &&
  312. "Field offset not aligned correctly");
  313. CharUnits fieldOffsetInCharUnits =
  314. Types.getContext().toCharUnitsFromBits(fieldOffset);
  315. // Try to resize the last base field.
  316. if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
  317. nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
  318. }
  319. if (fieldOffset < nextFieldOffsetInBits) {
  320. assert(BitsAvailableInLastField && "Bitfield size mismatch!");
  321. assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
  322. // The bitfield begins in the previous bit-field.
  323. numBytesToAppend = Types.getContext().toCharUnitsFromBits(
  324. llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
  325. charAlign));
  326. } else {
  327. assert(fieldOffset % charAlign == 0 &&
  328. "Field offset not aligned correctly");
  329. // Append padding if necessary.
  330. AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
  331. CharUnits::One());
  332. numBytesToAppend = Types.getContext().toCharUnitsFromBits(
  333. llvm::RoundUpToAlignment(fieldSize, charAlign));
  334. assert(!numBytesToAppend.isZero() && "No bytes to append!");
  335. }
  336. // Add the bit field info.
  337. BitFields.insert(std::make_pair(D,
  338. CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
  339. AppendBytes(numBytesToAppend);
  340. BitsAvailableInLastField =
  341. Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
  342. }
  343. bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
  344. uint64_t fieldOffset) {
  345. // If the field is packed, then we need a packed struct.
  346. if (!Packed && D->hasAttr<PackedAttr>())
  347. return false;
  348. if (D->isBitField()) {
  349. // We must use packed structs for unnamed bit fields since they
  350. // don't affect the struct alignment.
  351. if (!Packed && !D->getDeclName())
  352. return false;
  353. LayoutBitField(D, fieldOffset);
  354. return true;
  355. }
  356. CheckZeroInitializable(D->getType());
  357. assert(fieldOffset % Types.getTarget().getCharWidth() == 0
  358. && "field offset is not on a byte boundary!");
  359. CharUnits fieldOffsetInBytes
  360. = Types.getContext().toCharUnitsFromBits(fieldOffset);
  361. llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
  362. CharUnits typeAlignment = getTypeAlignment(Ty);
  363. // If the type alignment is larger then the struct alignment, we must use
  364. // a packed struct.
  365. if (typeAlignment > Alignment) {
  366. assert(!Packed && "Alignment is wrong even with packed struct!");
  367. return false;
  368. }
  369. if (!Packed) {
  370. if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
  371. const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
  372. if (const MaxFieldAlignmentAttr *MFAA =
  373. RD->getAttr<MaxFieldAlignmentAttr>()) {
  374. if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
  375. return false;
  376. }
  377. }
  378. }
  379. // Round up the field offset to the alignment of the field type.
  380. CharUnits alignedNextFieldOffsetInBytes =
  381. NextFieldOffset.RoundUpToAlignment(typeAlignment);
  382. if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
  383. // Try to resize the last base field.
  384. if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
  385. alignedNextFieldOffsetInBytes =
  386. NextFieldOffset.RoundUpToAlignment(typeAlignment);
  387. }
  388. }
  389. if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
  390. assert(!Packed && "Could not place field even with packed struct!");
  391. return false;
  392. }
  393. AppendPadding(fieldOffsetInBytes, typeAlignment);
  394. // Now append the field.
  395. Fields[D] = FieldTypes.size();
  396. AppendField(fieldOffsetInBytes, Ty);
  397. LastLaidOutBase.invalidate();
  398. return true;
  399. }
  400. llvm::Type *
  401. CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
  402. const ASTRecordLayout &Layout) {
  403. if (Field->isBitField()) {
  404. uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
  405. // Ignore zero sized bit fields.
  406. if (FieldSize == 0)
  407. return 0;
  408. llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
  409. CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
  410. llvm::RoundUpToAlignment(FieldSize,
  411. Types.getContext().getTargetInfo().getCharAlign()));
  412. if (NumBytesToAppend > CharUnits::One())
  413. FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
  414. // Add the bit field info.
  415. BitFields.insert(std::make_pair(Field,
  416. CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
  417. return FieldTy;
  418. }
  419. // This is a regular union field.
  420. Fields[Field] = 0;
  421. return Types.ConvertTypeForMem(Field->getType());
  422. }
  423. void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
  424. assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
  425. const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
  426. llvm::Type *unionType = 0;
  427. CharUnits unionSize = CharUnits::Zero();
  428. CharUnits unionAlign = CharUnits::Zero();
  429. bool hasOnlyZeroSizedBitFields = true;
  430. bool checkedFirstFieldZeroInit = false;
  431. unsigned fieldNo = 0;
  432. for (RecordDecl::field_iterator field = D->field_begin(),
  433. fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
  434. assert(layout.getFieldOffset(fieldNo) == 0 &&
  435. "Union field offset did not start at the beginning of record!");
  436. llvm::Type *fieldType = LayoutUnionField(*field, layout);
  437. if (!fieldType)
  438. continue;
  439. if (field->getDeclName() && !checkedFirstFieldZeroInit) {
  440. CheckZeroInitializable(field->getType());
  441. checkedFirstFieldZeroInit = true;
  442. }
  443. hasOnlyZeroSizedBitFields = false;
  444. CharUnits fieldAlign = CharUnits::fromQuantity(
  445. Types.getTargetData().getABITypeAlignment(fieldType));
  446. CharUnits fieldSize = CharUnits::fromQuantity(
  447. Types.getTargetData().getTypeAllocSize(fieldType));
  448. if (fieldAlign < unionAlign)
  449. continue;
  450. if (fieldAlign > unionAlign || fieldSize > unionSize) {
  451. unionType = fieldType;
  452. unionAlign = fieldAlign;
  453. unionSize = fieldSize;
  454. }
  455. }
  456. // Now add our field.
  457. if (unionType) {
  458. AppendField(CharUnits::Zero(), unionType);
  459. if (getTypeAlignment(unionType) > layout.getAlignment()) {
  460. // We need a packed struct.
  461. Packed = true;
  462. unionAlign = CharUnits::One();
  463. }
  464. }
  465. if (unionAlign.isZero()) {
  466. assert(hasOnlyZeroSizedBitFields &&
  467. "0-align record did not have all zero-sized bit-fields!");
  468. unionAlign = CharUnits::One();
  469. }
  470. // Append tail padding.
  471. CharUnits recordSize = layout.getSize();
  472. if (recordSize > unionSize)
  473. AppendPadding(recordSize, unionAlign);
  474. }
  475. bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
  476. const CGRecordLayout &baseLayout,
  477. CharUnits baseOffset) {
  478. ResizeLastBaseFieldIfNecessary(baseOffset);
  479. AppendPadding(baseOffset, CharUnits::One());
  480. const ASTRecordLayout &baseASTLayout
  481. = Types.getContext().getASTRecordLayout(base);
  482. LastLaidOutBase.Offset = NextFieldOffset;
  483. LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
  484. llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
  485. if (getTypeAlignment(subobjectType) > Alignment)
  486. return false;
  487. AppendField(baseOffset, subobjectType);
  488. return true;
  489. }
  490. bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
  491. CharUnits baseOffset) {
  492. // Ignore empty bases.
  493. if (base->isEmpty()) return true;
  494. const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
  495. if (IsZeroInitializableAsBase) {
  496. assert(IsZeroInitializable &&
  497. "class zero-initializable as base but not as complete object");
  498. IsZeroInitializable = IsZeroInitializableAsBase =
  499. baseLayout.isZeroInitializableAsBase();
  500. }
  501. if (!LayoutBase(base, baseLayout, baseOffset))
  502. return false;
  503. NonVirtualBases[base] = (FieldTypes.size() - 1);
  504. return true;
  505. }
  506. bool
  507. CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
  508. CharUnits baseOffset) {
  509. // Ignore empty bases.
  510. if (base->isEmpty()) return true;
  511. const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
  512. if (IsZeroInitializable)
  513. IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
  514. if (!LayoutBase(base, baseLayout, baseOffset))
  515. return false;
  516. VirtualBases[base] = (FieldTypes.size() - 1);
  517. return true;
  518. }
  519. bool
  520. CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
  521. const ASTRecordLayout &Layout) {
  522. if (!RD->getNumVBases())
  523. return true;
  524. // The vbases list is uniqued and ordered by a depth-first
  525. // traversal, which is what we need here.
  526. for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
  527. E = RD->vbases_end(); I != E; ++I) {
  528. const CXXRecordDecl *BaseDecl =
  529. cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
  530. CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
  531. if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
  532. return false;
  533. }
  534. return true;
  535. }
  536. /// LayoutVirtualBases - layout the non-virtual bases of a record decl.
  537. bool
  538. CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
  539. const ASTRecordLayout &Layout) {
  540. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
  541. E = RD->bases_end(); I != E; ++I) {
  542. const CXXRecordDecl *BaseDecl =
  543. cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
  544. // We only want to lay out virtual bases that aren't indirect primary bases
  545. // of some other base.
  546. if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
  547. // Only lay out the base once.
  548. if (!LaidOutVirtualBases.insert(BaseDecl))
  549. continue;
  550. CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
  551. if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
  552. return false;
  553. }
  554. if (!BaseDecl->getNumVBases()) {
  555. // This base isn't interesting since it doesn't have any virtual bases.
  556. continue;
  557. }
  558. if (!LayoutVirtualBases(BaseDecl, Layout))
  559. return false;
  560. }
  561. return true;
  562. }
  563. bool
  564. CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
  565. const ASTRecordLayout &Layout) {
  566. const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
  567. // If we have a primary base, lay it out first.
  568. if (PrimaryBase) {
  569. if (!Layout.isPrimaryBaseVirtual()) {
  570. if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
  571. return false;
  572. } else {
  573. if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
  574. return false;
  575. }
  576. // Otherwise, add a vtable / vf-table if the layout says to do so.
  577. } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
  578. ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
  579. : RD->isDynamicClass()) {
  580. llvm::Type *FunctionType =
  581. llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
  582. /*isVarArg=*/true);
  583. llvm::Type *VTableTy = FunctionType->getPointerTo();
  584. assert(NextFieldOffset.isZero() &&
  585. "VTable pointer must come first!");
  586. AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
  587. }
  588. // Layout the non-virtual bases.
  589. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
  590. E = RD->bases_end(); I != E; ++I) {
  591. if (I->isVirtual())
  592. continue;
  593. const CXXRecordDecl *BaseDecl =
  594. cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
  595. // We've already laid out the primary base.
  596. if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
  597. continue;
  598. if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
  599. return false;
  600. }
  601. // Add a vb-table pointer if the layout insists.
  602. if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
  603. CharUnits VBPtrOffset = Layout.getVBPtrOffset();
  604. llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
  605. AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
  606. AppendField(VBPtrOffset, Vbptr);
  607. }
  608. return true;
  609. }
  610. bool
  611. CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
  612. const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
  613. CharUnits NonVirtualSize = Layout.getNonVirtualSize();
  614. CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
  615. CharUnits AlignedNonVirtualTypeSize =
  616. NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
  617. // First check if we can use the same fields as for the complete class.
  618. CharUnits RecordSize = Layout.getSize();
  619. if (AlignedNonVirtualTypeSize == RecordSize)
  620. return true;
  621. // Check if we need padding.
  622. CharUnits AlignedNextFieldOffset =
  623. NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
  624. if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
  625. assert(!Packed && "cannot layout even as packed struct");
  626. return false; // Needs packing.
  627. }
  628. bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
  629. if (needsPadding) {
  630. CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
  631. FieldTypes.push_back(getByteArrayType(NumBytes));
  632. }
  633. BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
  634. FieldTypes, "", Packed);
  635. Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
  636. // Pull the padding back off.
  637. if (needsPadding)
  638. FieldTypes.pop_back();
  639. return true;
  640. }
  641. bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
  642. assert(!D->isUnion() && "Can't call LayoutFields on a union!");
  643. assert(!Alignment.isZero() && "Did not set alignment!");
  644. const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
  645. const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
  646. if (RD)
  647. if (!LayoutNonVirtualBases(RD, Layout))
  648. return false;
  649. unsigned FieldNo = 0;
  650. const FieldDecl *LastFD = 0;
  651. for (RecordDecl::field_iterator Field = D->field_begin(),
  652. FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
  653. if (IsMsStruct) {
  654. // Zero-length bitfields following non-bitfield members are
  655. // ignored:
  656. const FieldDecl *FD = (*Field);
  657. if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
  658. --FieldNo;
  659. continue;
  660. }
  661. LastFD = FD;
  662. }
  663. if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
  664. assert(!Packed &&
  665. "Could not layout fields even with a packed LLVM struct!");
  666. return false;
  667. }
  668. }
  669. if (RD) {
  670. // We've laid out the non-virtual bases and the fields, now compute the
  671. // non-virtual base field types.
  672. if (!ComputeNonVirtualBaseType(RD)) {
  673. assert(!Packed && "Could not layout even with a packed LLVM struct!");
  674. return false;
  675. }
  676. // Lay out the virtual bases. The MS ABI uses a different
  677. // algorithm here due to the lack of primary virtual bases.
  678. if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
  679. RD->getIndirectPrimaryBases(IndirectPrimaryBases);
  680. if (Layout.isPrimaryBaseVirtual())
  681. IndirectPrimaryBases.insert(Layout.getPrimaryBase());
  682. if (!LayoutVirtualBases(RD, Layout))
  683. return false;
  684. } else {
  685. if (!MSLayoutVirtualBases(RD, Layout))
  686. return false;
  687. }
  688. }
  689. // Append tail padding if necessary.
  690. AppendTailPadding(Layout.getSize());
  691. return true;
  692. }
  693. void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
  694. ResizeLastBaseFieldIfNecessary(RecordSize);
  695. assert(NextFieldOffset <= RecordSize && "Size mismatch!");
  696. CharUnits AlignedNextFieldOffset =
  697. NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
  698. if (AlignedNextFieldOffset == RecordSize) {
  699. // We don't need any padding.
  700. return;
  701. }
  702. CharUnits NumPadBytes = RecordSize - NextFieldOffset;
  703. AppendBytes(NumPadBytes);
  704. }
  705. void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
  706. llvm::Type *fieldType) {
  707. CharUnits fieldSize =
  708. CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
  709. FieldTypes.push_back(fieldType);
  710. NextFieldOffset = fieldOffset + fieldSize;
  711. BitsAvailableInLastField = 0;
  712. }
  713. void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
  714. CharUnits fieldAlignment) {
  715. assert(NextFieldOffset <= fieldOffset &&
  716. "Incorrect field layout!");
  717. // Do nothing if we're already at the right offset.
  718. if (fieldOffset == NextFieldOffset) return;
  719. // If we're not emitting a packed LLVM type, try to avoid adding
  720. // unnecessary padding fields.
  721. if (!Packed) {
  722. // Round up the field offset to the alignment of the field type.
  723. CharUnits alignedNextFieldOffset =
  724. NextFieldOffset.RoundUpToAlignment(fieldAlignment);
  725. assert(alignedNextFieldOffset <= fieldOffset);
  726. // If that's the right offset, we're done.
  727. if (alignedNextFieldOffset == fieldOffset) return;
  728. }
  729. // Otherwise we need explicit padding.
  730. CharUnits padding = fieldOffset - NextFieldOffset;
  731. AppendBytes(padding);
  732. }
  733. bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
  734. // Check if we have a base to resize.
  735. if (!LastLaidOutBase.isValid())
  736. return false;
  737. // This offset does not overlap with the tail padding.
  738. if (offset >= NextFieldOffset)
  739. return false;
  740. // Restore the field offset and append an i8 array instead.
  741. FieldTypes.pop_back();
  742. NextFieldOffset = LastLaidOutBase.Offset;
  743. AppendBytes(LastLaidOutBase.NonVirtualSize);
  744. LastLaidOutBase.invalidate();
  745. return true;
  746. }
  747. llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
  748. assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
  749. llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
  750. if (numBytes > CharUnits::One())
  751. Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
  752. return Ty;
  753. }
  754. void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
  755. if (numBytes.isZero())
  756. return;
  757. // Append the padding field
  758. AppendField(NextFieldOffset, getByteArrayType(numBytes));
  759. }
  760. CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
  761. if (Packed)
  762. return CharUnits::One();
  763. return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
  764. }
  765. CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
  766. if (Packed)
  767. return CharUnits::One();
  768. CharUnits maxAlignment = CharUnits::One();
  769. for (size_t i = 0; i != FieldTypes.size(); ++i)
  770. maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
  771. return maxAlignment;
  772. }
  773. /// Merge in whether a field of the given type is zero-initializable.
  774. void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
  775. // This record already contains a member pointer.
  776. if (!IsZeroInitializableAsBase)
  777. return;
  778. // Can only have member pointers if we're compiling C++.
  779. if (!Types.getContext().getLangOptions().CPlusPlus)
  780. return;
  781. const Type *elementType = T->getBaseElementTypeUnsafe();
  782. if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
  783. if (!Types.getCXXABI().isZeroInitializable(MPT))
  784. IsZeroInitializable = IsZeroInitializableAsBase = false;
  785. } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
  786. const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
  787. const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
  788. if (!Layout.isZeroInitializable())
  789. IsZeroInitializable = IsZeroInitializableAsBase = false;
  790. }
  791. }
  792. CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
  793. llvm::StructType *Ty) {
  794. CGRecordLayoutBuilder Builder(*this);
  795. Builder.Layout(D);
  796. Ty->setBody(Builder.FieldTypes, Builder.Packed);
  797. // If we're in C++, compute the base subobject type.
  798. llvm::StructType *BaseTy = 0;
  799. if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
  800. BaseTy = Builder.BaseSubobjectType;
  801. if (!BaseTy) BaseTy = Ty;
  802. }
  803. CGRecordLayout *RL =
  804. new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
  805. Builder.IsZeroInitializableAsBase);
  806. RL->NonVirtualBases.swap(Builder.NonVirtualBases);
  807. RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
  808. // Add all the field numbers.
  809. RL->FieldInfo.swap(Builder.Fields);
  810. // Add bitfield info.
  811. RL->BitFields.swap(Builder.BitFields);
  812. // Dump the layout, if requested.
  813. if (getContext().getLangOptions().DumpRecordLayouts) {
  814. llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
  815. llvm::errs() << "Record: ";
  816. D->dump();
  817. llvm::errs() << "\nLayout: ";
  818. RL->dump();
  819. }
  820. #ifndef NDEBUG
  821. // Verify that the computed LLVM struct size matches the AST layout size.
  822. const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
  823. uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
  824. assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
  825. "Type size mismatch!");
  826. if (BaseTy) {
  827. CharUnits NonVirtualSize = Layout.getNonVirtualSize();
  828. CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
  829. CharUnits AlignedNonVirtualTypeSize =
  830. NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
  831. uint64_t AlignedNonVirtualTypeSizeInBits =
  832. getContext().toBits(AlignedNonVirtualTypeSize);
  833. assert(AlignedNonVirtualTypeSizeInBits ==
  834. getTargetData().getTypeAllocSizeInBits(BaseTy) &&
  835. "Type size mismatch!");
  836. }
  837. // Verify that the LLVM and AST field offsets agree.
  838. llvm::StructType *ST =
  839. dyn_cast<llvm::StructType>(RL->getLLVMType());
  840. const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
  841. const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
  842. RecordDecl::field_iterator it = D->field_begin();
  843. const FieldDecl *LastFD = 0;
  844. bool IsMsStruct = D->hasAttr<MsStructAttr>();
  845. for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
  846. const FieldDecl *FD = *it;
  847. // For non-bit-fields, just check that the LLVM struct offset matches the
  848. // AST offset.
  849. if (!FD->isBitField()) {
  850. unsigned FieldNo = RL->getLLVMFieldNo(FD);
  851. assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
  852. "Invalid field offset!");
  853. LastFD = FD;
  854. continue;
  855. }
  856. if (IsMsStruct) {
  857. // Zero-length bitfields following non-bitfield members are
  858. // ignored:
  859. if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
  860. --i;
  861. continue;
  862. }
  863. LastFD = FD;
  864. }
  865. // Ignore unnamed bit-fields.
  866. if (!FD->getDeclName()) {
  867. LastFD = FD;
  868. continue;
  869. }
  870. const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
  871. for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
  872. const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
  873. // Verify that every component access is within the structure.
  874. uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
  875. uint64_t AccessBitOffset = FieldOffset +
  876. getContext().toBits(AI.FieldByteOffset);
  877. assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
  878. "Invalid bit-field access (out of range)!");
  879. }
  880. }
  881. #endif
  882. return RL;
  883. }
  884. void CGRecordLayout::print(raw_ostream &OS) const {
  885. OS << "<CGRecordLayout\n";
  886. OS << " LLVMType:" << *CompleteObjectType << "\n";
  887. if (BaseSubobjectType)
  888. OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
  889. OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
  890. OS << " BitFields:[\n";
  891. // Print bit-field infos in declaration order.
  892. std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
  893. for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
  894. it = BitFields.begin(), ie = BitFields.end();
  895. it != ie; ++it) {
  896. const RecordDecl *RD = it->first->getParent();
  897. unsigned Index = 0;
  898. for (RecordDecl::field_iterator
  899. it2 = RD->field_begin(); *it2 != it->first; ++it2)
  900. ++Index;
  901. BFIs.push_back(std::make_pair(Index, &it->second));
  902. }
  903. llvm::array_pod_sort(BFIs.begin(), BFIs.end());
  904. for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
  905. OS.indent(4);
  906. BFIs[i].second->print(OS);
  907. OS << "\n";
  908. }
  909. OS << "]>\n";
  910. }
  911. void CGRecordLayout::dump() const {
  912. print(llvm::errs());
  913. }
  914. void CGBitFieldInfo::print(raw_ostream &OS) const {
  915. OS << "<CGBitFieldInfo";
  916. OS << " Size:" << Size;
  917. OS << " IsSigned:" << IsSigned << "\n";
  918. OS.indent(4 + strlen("<CGBitFieldInfo"));
  919. OS << " NumComponents:" << getNumComponents();
  920. OS << " Components: [";
  921. if (getNumComponents()) {
  922. OS << "\n";
  923. for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
  924. const AccessInfo &AI = getComponent(i);
  925. OS.indent(8);
  926. OS << "<AccessInfo"
  927. << " FieldIndex:" << AI.FieldIndex
  928. << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
  929. << " FieldBitStart:" << AI.FieldBitStart
  930. << " AccessWidth:" << AI.AccessWidth << "\n";
  931. OS.indent(8 + strlen("<AccessInfo"));
  932. OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
  933. << " TargetBitOffset:" << AI.TargetBitOffset
  934. << " TargetBitWidth:" << AI.TargetBitWidth
  935. << ">\n";
  936. }
  937. OS.indent(4);
  938. }
  939. OS << "]>";
  940. }
  941. void CGBitFieldInfo::dump() const {
  942. print(llvm::errs());
  943. }