CGRecordLayoutBuilder.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // Builder implementation for CGRecordLayout objects.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "CGRecordLayout.h"
  14. #include "clang/AST/ASTContext.h"
  15. #include "clang/AST/Attr.h"
  16. #include "clang/AST/DeclCXX.h"
  17. #include "clang/AST/Expr.h"
  18. #include "clang/AST/RecordLayout.h"
  19. #include "CodeGenTypes.h"
  20. #include "CGCXXABI.h"
  21. #include "llvm/DerivedTypes.h"
  22. #include "llvm/Type.h"
  23. #include "llvm/Support/Debug.h"
  24. #include "llvm/Support/raw_ostream.h"
  25. #include "llvm/Target/TargetData.h"
  26. using namespace clang;
  27. using namespace CodeGen;
  28. namespace clang {
  29. namespace CodeGen {
  30. class CGRecordLayoutBuilder {
  31. public:
  32. /// FieldTypes - Holds the LLVM types that the struct is created from.
  33. std::vector<const llvm::Type *> FieldTypes;
  34. /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
  35. typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
  36. llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
  37. /// LLVMBitFieldInfo - Holds location and size information about a bit field.
  38. typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
  39. llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
  40. typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
  41. llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
  42. /// IsZeroInitializable - Whether this struct can be C++
  43. /// zero-initialized with an LLVM zeroinitializer.
  44. bool IsZeroInitializable;
  45. /// Packed - Whether the resulting LLVM struct will be packed or not.
  46. bool Packed;
  47. private:
  48. CodeGenTypes &Types;
  49. /// Alignment - Contains the alignment of the RecordDecl.
  50. //
  51. // FIXME: This is not needed and should be removed.
  52. unsigned Alignment;
  53. /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
  54. /// LLVM types.
  55. unsigned AlignmentAsLLVMStruct;
  56. /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
  57. /// this will have the number of bits still available in the field.
  58. char BitsAvailableInLastField;
  59. /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
  60. uint64_t NextFieldOffsetInBytes;
  61. /// LayoutUnionField - Will layout a field in an union and return the type
  62. /// that the field will have.
  63. const llvm::Type *LayoutUnionField(const FieldDecl *Field,
  64. const ASTRecordLayout &Layout);
  65. /// LayoutUnion - Will layout a union RecordDecl.
  66. void LayoutUnion(const RecordDecl *D);
  67. /// LayoutField - try to layout all fields in the record decl.
  68. /// Returns false if the operation failed because the struct is not packed.
  69. bool LayoutFields(const RecordDecl *D);
  70. /// LayoutNonVirtualBase - layout a single non-virtual base.
  71. void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
  72. uint64_t BaseOffset);
  73. /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
  74. void LayoutNonVirtualBases(const CXXRecordDecl *RD,
  75. const ASTRecordLayout &Layout);
  76. /// LayoutField - layout a single field. Returns false if the operation failed
  77. /// because the current struct is not packed.
  78. bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
  79. /// LayoutBitField - layout a single bit field.
  80. void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
  81. /// AppendField - Appends a field with the given offset and type.
  82. void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
  83. /// AppendPadding - Appends enough padding bytes so that the total
  84. /// struct size is a multiple of the field alignment.
  85. void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
  86. /// AppendBytes - Append a given number of bytes to the record.
  87. void AppendBytes(uint64_t NumBytes);
  88. /// AppendTailPadding - Append enough tail padding so that the type will have
  89. /// the passed size.
  90. void AppendTailPadding(uint64_t RecordSize);
  91. unsigned getTypeAlignment(const llvm::Type *Ty) const;
  92. /// CheckZeroInitializable - Check if the given type contains a pointer
  93. /// to data member.
  94. void CheckZeroInitializable(QualType T);
  95. void CheckZeroInitializable(const CXXRecordDecl *RD);
  96. public:
  97. CGRecordLayoutBuilder(CodeGenTypes &Types)
  98. : IsZeroInitializable(true), Packed(false), Types(Types),
  99. Alignment(0), AlignmentAsLLVMStruct(1),
  100. BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
  101. /// Layout - Will layout a RecordDecl.
  102. void Layout(const RecordDecl *D);
  103. };
  104. }
  105. }
  106. void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
  107. Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
  108. Packed = D->hasAttr<PackedAttr>();
  109. if (D->isUnion()) {
  110. LayoutUnion(D);
  111. return;
  112. }
  113. if (LayoutFields(D))
  114. return;
  115. // We weren't able to layout the struct. Try again with a packed struct
  116. Packed = true;
  117. AlignmentAsLLVMStruct = 1;
  118. NextFieldOffsetInBytes = 0;
  119. FieldTypes.clear();
  120. LLVMFields.clear();
  121. LLVMBitFields.clear();
  122. LLVMNonVirtualBases.clear();
  123. LayoutFields(D);
  124. }
  125. CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
  126. const FieldDecl *FD,
  127. uint64_t FieldOffset,
  128. uint64_t FieldSize,
  129. uint64_t ContainingTypeSizeInBits,
  130. unsigned ContainingTypeAlign) {
  131. const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
  132. uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
  133. uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
  134. bool IsSigned = FD->getType()->isSignedIntegerType();
  135. if (FieldSize > TypeSizeInBits) {
  136. // We have a wide bit-field. The extra bits are only used for padding, so
  137. // if we have a bitfield of type T, with size N:
  138. //
  139. // T t : N;
  140. //
  141. // We can just assume that it's:
  142. //
  143. // T t : sizeof(T);
  144. //
  145. FieldSize = TypeSizeInBits;
  146. }
  147. // Compute the access components. The policy we use is to start by attempting
  148. // to access using the width of the bit-field type itself and to always access
  149. // at aligned indices of that type. If such an access would fail because it
  150. // extends past the bound of the type, then we reduce size to the next smaller
  151. // power of two and retry. The current algorithm assumes pow2 sized types,
  152. // although this is easy to fix.
  153. //
  154. // FIXME: This algorithm is wrong on big-endian systems, I think.
  155. assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
  156. CGBitFieldInfo::AccessInfo Components[3];
  157. unsigned NumComponents = 0;
  158. unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
  159. unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
  160. // Round down from the field offset to find the first access position that is
  161. // at an aligned offset of the initial access type.
  162. uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  163. // Adjust initial access size to fit within record.
  164. while (AccessWidth > 8 &&
  165. AccessStart + AccessWidth > ContainingTypeSizeInBits) {
  166. AccessWidth >>= 1;
  167. AccessStart = FieldOffset - (FieldOffset % AccessWidth);
  168. }
  169. while (AccessedTargetBits < FieldSize) {
  170. // Check that we can access using a type of this size, without reading off
  171. // the end of the structure. This can occur with packed structures and
  172. // -fno-bitfield-type-align, for example.
  173. if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
  174. // If so, reduce access size to the next smaller power-of-two and retry.
  175. AccessWidth >>= 1;
  176. assert(AccessWidth >= 8 && "Cannot access under byte size!");
  177. continue;
  178. }
  179. // Otherwise, add an access component.
  180. // First, compute the bits inside this access which are part of the
  181. // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
  182. // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
  183. // in the target that we are reading.
  184. assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
  185. assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
  186. uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
  187. uint64_t AccessBitsInFieldSize =
  188. std::min(AccessWidth + AccessStart,
  189. FieldOffset + FieldSize) - AccessBitsInFieldStart;
  190. assert(NumComponents < 3 && "Unexpected number of components!");
  191. CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
  192. AI.FieldIndex = 0;
  193. // FIXME: We still follow the old access pattern of only using the field
  194. // byte offset. We should switch this once we fix the struct layout to be
  195. // pretty.
  196. AI.FieldByteOffset = AccessStart / 8;
  197. AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
  198. AI.AccessWidth = AccessWidth;
  199. AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
  200. AI.TargetBitOffset = AccessedTargetBits;
  201. AI.TargetBitWidth = AccessBitsInFieldSize;
  202. AccessStart += AccessWidth;
  203. AccessedTargetBits += AI.TargetBitWidth;
  204. }
  205. assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
  206. return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
  207. }
  208. CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
  209. const FieldDecl *FD,
  210. uint64_t FieldOffset,
  211. uint64_t FieldSize) {
  212. const RecordDecl *RD = FD->getParent();
  213. const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
  214. uint64_t ContainingTypeSizeInBits = RL.getSize();
  215. unsigned ContainingTypeAlign = RL.getAlignment();
  216. return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
  217. ContainingTypeAlign);
  218. }
  219. void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
  220. uint64_t FieldOffset) {
  221. uint64_t FieldSize =
  222. D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
  223. if (FieldSize == 0)
  224. return;
  225. uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
  226. unsigned NumBytesToAppend;
  227. if (FieldOffset < NextFieldOffset) {
  228. assert(BitsAvailableInLastField && "Bitfield size mismatch!");
  229. assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
  230. // The bitfield begins in the previous bit-field.
  231. NumBytesToAppend =
  232. llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
  233. } else {
  234. assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
  235. // Append padding if necessary.
  236. AppendBytes((FieldOffset - NextFieldOffset) / 8);
  237. NumBytesToAppend =
  238. llvm::RoundUpToAlignment(FieldSize, 8) / 8;
  239. assert(NumBytesToAppend && "No bytes to append!");
  240. }
  241. // Add the bit field info.
  242. LLVMBitFields.push_back(
  243. LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
  244. FieldSize)));
  245. AppendBytes(NumBytesToAppend);
  246. BitsAvailableInLastField =
  247. NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
  248. }
  249. bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
  250. uint64_t FieldOffset) {
  251. // If the field is packed, then we need a packed struct.
  252. if (!Packed && D->hasAttr<PackedAttr>())
  253. return false;
  254. if (D->isBitField()) {
  255. // We must use packed structs for unnamed bit fields since they
  256. // don't affect the struct alignment.
  257. if (!Packed && !D->getDeclName())
  258. return false;
  259. LayoutBitField(D, FieldOffset);
  260. return true;
  261. }
  262. CheckZeroInitializable(D->getType());
  263. assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
  264. uint64_t FieldOffsetInBytes = FieldOffset / 8;
  265. const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
  266. unsigned TypeAlignment = getTypeAlignment(Ty);
  267. // If the type alignment is larger then the struct alignment, we must use
  268. // a packed struct.
  269. if (TypeAlignment > Alignment) {
  270. assert(!Packed && "Alignment is wrong even with packed struct!");
  271. return false;
  272. }
  273. if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
  274. const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
  275. if (const MaxFieldAlignmentAttr *MFAA =
  276. RD->getAttr<MaxFieldAlignmentAttr>()) {
  277. if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
  278. return false;
  279. }
  280. }
  281. // Round up the field offset to the alignment of the field type.
  282. uint64_t AlignedNextFieldOffsetInBytes =
  283. llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
  284. if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
  285. assert(!Packed && "Could not place field even with packed struct!");
  286. return false;
  287. }
  288. if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
  289. // Even with alignment, the field offset is not at the right place,
  290. // insert padding.
  291. uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
  292. AppendBytes(PaddingInBytes);
  293. }
  294. // Now append the field.
  295. LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
  296. AppendField(FieldOffsetInBytes, Ty);
  297. return true;
  298. }
  299. const llvm::Type *
  300. CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
  301. const ASTRecordLayout &Layout) {
  302. if (Field->isBitField()) {
  303. uint64_t FieldSize =
  304. Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
  305. // Ignore zero sized bit fields.
  306. if (FieldSize == 0)
  307. return 0;
  308. const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
  309. unsigned NumBytesToAppend =
  310. llvm::RoundUpToAlignment(FieldSize, 8) / 8;
  311. if (NumBytesToAppend > 1)
  312. FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
  313. // Add the bit field info.
  314. LLVMBitFields.push_back(
  315. LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
  316. 0, FieldSize)));
  317. return FieldTy;
  318. }
  319. // This is a regular union field.
  320. LLVMFields.push_back(LLVMFieldInfo(Field, 0));
  321. return Types.ConvertTypeForMemRecursive(Field->getType());
  322. }
  323. void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
  324. assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
  325. const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
  326. const llvm::Type *Ty = 0;
  327. uint64_t Size = 0;
  328. unsigned Align = 0;
  329. bool HasOnlyZeroSizedBitFields = true;
  330. unsigned FieldNo = 0;
  331. for (RecordDecl::field_iterator Field = D->field_begin(),
  332. FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
  333. assert(Layout.getFieldOffset(FieldNo) == 0 &&
  334. "Union field offset did not start at the beginning of record!");
  335. const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
  336. if (!FieldTy)
  337. continue;
  338. HasOnlyZeroSizedBitFields = false;
  339. unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
  340. uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
  341. if (FieldAlign < Align)
  342. continue;
  343. if (FieldAlign > Align || FieldSize > Size) {
  344. Ty = FieldTy;
  345. Align = FieldAlign;
  346. Size = FieldSize;
  347. }
  348. }
  349. // Now add our field.
  350. if (Ty) {
  351. AppendField(0, Ty);
  352. if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
  353. // We need a packed struct.
  354. Packed = true;
  355. Align = 1;
  356. }
  357. }
  358. if (!Align) {
  359. assert(HasOnlyZeroSizedBitFields &&
  360. "0-align record did not have all zero-sized bit-fields!");
  361. Align = 1;
  362. }
  363. // Append tail padding.
  364. if (Layout.getSize() / 8 > Size)
  365. AppendPadding(Layout.getSize() / 8, Align);
  366. }
  367. void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
  368. uint64_t BaseOffset) {
  369. const ASTRecordLayout &Layout =
  370. Types.getContext().getASTRecordLayout(BaseDecl);
  371. uint64_t NonVirtualSize = Layout.getNonVirtualSize();
  372. if (BaseDecl->isEmpty()) {
  373. // FIXME: Lay out empty bases.
  374. return;
  375. }
  376. CheckZeroInitializable(BaseDecl);
  377. // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
  378. AppendPadding(BaseOffset / 8, 1);
  379. // Append the base field.
  380. LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
  381. AppendBytes(NonVirtualSize / 8);
  382. }
  383. void
  384. CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
  385. const ASTRecordLayout &Layout) {
  386. const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
  387. // Check if we need to add a vtable pointer.
  388. if (RD->isDynamicClass()) {
  389. if (!PrimaryBase) {
  390. const llvm::Type *FunctionType =
  391. llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
  392. /*isVarArg=*/true);
  393. const llvm::Type *VTableTy = FunctionType->getPointerTo();
  394. assert(NextFieldOffsetInBytes == 0 &&
  395. "VTable pointer must come first!");
  396. AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
  397. } else {
  398. // FIXME: Handle a virtual primary base.
  399. if (!Layout.getPrimaryBaseWasVirtual())
  400. LayoutNonVirtualBase(PrimaryBase, 0);
  401. }
  402. }
  403. // Layout the non-virtual bases.
  404. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
  405. E = RD->bases_end(); I != E; ++I) {
  406. if (I->isVirtual())
  407. continue;
  408. const CXXRecordDecl *BaseDecl =
  409. cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
  410. // We've already laid out the primary base.
  411. if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
  412. continue;
  413. LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffsetInBits(BaseDecl));
  414. }
  415. }
  416. bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
  417. assert(!D->isUnion() && "Can't call LayoutFields on a union!");
  418. assert(Alignment && "Did not set alignment!");
  419. const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
  420. if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
  421. LayoutNonVirtualBases(RD, Layout);
  422. unsigned FieldNo = 0;
  423. for (RecordDecl::field_iterator Field = D->field_begin(),
  424. FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
  425. if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
  426. assert(!Packed &&
  427. "Could not layout fields even with a packed LLVM struct!");
  428. return false;
  429. }
  430. }
  431. // Append tail padding if necessary.
  432. AppendTailPadding(Layout.getSize());
  433. return true;
  434. }
  435. void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
  436. assert(RecordSize % 8 == 0 && "Invalid record size!");
  437. uint64_t RecordSizeInBytes = RecordSize / 8;
  438. assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
  439. uint64_t AlignedNextFieldOffset =
  440. llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
  441. if (AlignedNextFieldOffset == RecordSizeInBytes) {
  442. // We don't need any padding.
  443. return;
  444. }
  445. unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
  446. AppendBytes(NumPadBytes);
  447. }
  448. void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
  449. const llvm::Type *FieldTy) {
  450. AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
  451. getTypeAlignment(FieldTy));
  452. uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
  453. FieldTypes.push_back(FieldTy);
  454. NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
  455. BitsAvailableInLastField = 0;
  456. }
  457. void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
  458. unsigned FieldAlignment) {
  459. assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
  460. "Incorrect field layout!");
  461. // Round up the field offset to the alignment of the field type.
  462. uint64_t AlignedNextFieldOffsetInBytes =
  463. llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
  464. if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
  465. // Even with alignment, the field offset is not at the right place,
  466. // insert padding.
  467. uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
  468. AppendBytes(PaddingInBytes);
  469. }
  470. }
  471. void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
  472. if (NumBytes == 0)
  473. return;
  474. const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
  475. if (NumBytes > 1)
  476. Ty = llvm::ArrayType::get(Ty, NumBytes);
  477. // Append the padding field
  478. AppendField(NextFieldOffsetInBytes, Ty);
  479. }
  480. unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
  481. if (Packed)
  482. return 1;
  483. return Types.getTargetData().getABITypeAlignment(Ty);
  484. }
  485. void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
  486. // This record already contains a member pointer.
  487. if (!IsZeroInitializable)
  488. return;
  489. // Can only have member pointers if we're compiling C++.
  490. if (!Types.getContext().getLangOptions().CPlusPlus)
  491. return;
  492. T = Types.getContext().getBaseElementType(T);
  493. if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
  494. if (!Types.getCXXABI().isZeroInitializable(MPT))
  495. IsZeroInitializable = false;
  496. } else if (const RecordType *RT = T->getAs<RecordType>()) {
  497. const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
  498. CheckZeroInitializable(RD);
  499. }
  500. }
  501. void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
  502. // This record already contains a member pointer.
  503. if (!IsZeroInitializable)
  504. return;
  505. // FIXME: It would be better if there was a way to explicitly compute the
  506. // record layout instead of converting to a type.
  507. Types.ConvertTagDeclType(RD);
  508. const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
  509. if (!Layout.isZeroInitializable())
  510. IsZeroInitializable = false;
  511. }
  512. CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
  513. CGRecordLayoutBuilder Builder(*this);
  514. Builder.Layout(D);
  515. const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
  516. Builder.FieldTypes,
  517. Builder.Packed);
  518. CGRecordLayout *RL =
  519. new CGRecordLayout(Ty, Builder.IsZeroInitializable);
  520. // Add all the non-virtual base field numbers.
  521. RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
  522. Builder.LLVMNonVirtualBases.end());
  523. // Add all the field numbers.
  524. RL->FieldInfo.insert(Builder.LLVMFields.begin(),
  525. Builder.LLVMFields.end());
  526. // Add bitfield info.
  527. RL->BitFields.insert(Builder.LLVMBitFields.begin(),
  528. Builder.LLVMBitFields.end());
  529. // Dump the layout, if requested.
  530. if (getContext().getLangOptions().DumpRecordLayouts) {
  531. llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
  532. llvm::errs() << "Record: ";
  533. D->dump();
  534. llvm::errs() << "\nLayout: ";
  535. RL->dump();
  536. }
  537. #ifndef NDEBUG
  538. // Verify that the computed LLVM struct size matches the AST layout size.
  539. uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
  540. assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
  541. "Type size mismatch!");
  542. // Verify that the LLVM and AST field offsets agree.
  543. const llvm::StructType *ST =
  544. dyn_cast<llvm::StructType>(RL->getLLVMType());
  545. const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
  546. const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
  547. RecordDecl::field_iterator it = D->field_begin();
  548. for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
  549. const FieldDecl *FD = *it;
  550. // For non-bit-fields, just check that the LLVM struct offset matches the
  551. // AST offset.
  552. if (!FD->isBitField()) {
  553. unsigned FieldNo = RL->getLLVMFieldNo(FD);
  554. assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
  555. "Invalid field offset!");
  556. continue;
  557. }
  558. // Ignore unnamed bit-fields.
  559. if (!FD->getDeclName())
  560. continue;
  561. const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
  562. for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
  563. const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
  564. // Verify that every component access is within the structure.
  565. uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
  566. uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
  567. assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
  568. "Invalid bit-field access (out of range)!");
  569. }
  570. }
  571. #endif
  572. return RL;
  573. }
  574. void CGRecordLayout::print(llvm::raw_ostream &OS) const {
  575. OS << "<CGRecordLayout\n";
  576. OS << " LLVMType:" << *LLVMType << "\n";
  577. OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
  578. OS << " BitFields:[\n";
  579. // Print bit-field infos in declaration order.
  580. std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
  581. for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
  582. it = BitFields.begin(), ie = BitFields.end();
  583. it != ie; ++it) {
  584. const RecordDecl *RD = it->first->getParent();
  585. unsigned Index = 0;
  586. for (RecordDecl::field_iterator
  587. it2 = RD->field_begin(); *it2 != it->first; ++it2)
  588. ++Index;
  589. BFIs.push_back(std::make_pair(Index, &it->second));
  590. }
  591. llvm::array_pod_sort(BFIs.begin(), BFIs.end());
  592. for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
  593. OS.indent(4);
  594. BFIs[i].second->print(OS);
  595. OS << "\n";
  596. }
  597. OS << "]>\n";
  598. }
  599. void CGRecordLayout::dump() const {
  600. print(llvm::errs());
  601. }
  602. void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
  603. OS << "<CGBitFieldInfo";
  604. OS << " Size:" << Size;
  605. OS << " IsSigned:" << IsSigned << "\n";
  606. OS.indent(4 + strlen("<CGBitFieldInfo"));
  607. OS << " NumComponents:" << getNumComponents();
  608. OS << " Components: [";
  609. if (getNumComponents()) {
  610. OS << "\n";
  611. for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
  612. const AccessInfo &AI = getComponent(i);
  613. OS.indent(8);
  614. OS << "<AccessInfo"
  615. << " FieldIndex:" << AI.FieldIndex
  616. << " FieldByteOffset:" << AI.FieldByteOffset
  617. << " FieldBitStart:" << AI.FieldBitStart
  618. << " AccessWidth:" << AI.AccessWidth << "\n";
  619. OS.indent(8 + strlen("<AccessInfo"));
  620. OS << " AccessAlignment:" << AI.AccessAlignment
  621. << " TargetBitOffset:" << AI.TargetBitOffset
  622. << " TargetBitWidth:" << AI.TargetBitWidth
  623. << ">\n";
  624. }
  625. OS.indent(4);
  626. }
  627. OS << "]>";
  628. }
  629. void CGBitFieldInfo::dump() const {
  630. print(llvm::errs());
  631. }