10b57cec5SDimitry Andric //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // Builder implementation for CGRecordLayout objects.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric 
130b57cec5SDimitry Andric #include "CGRecordLayout.h"
140b57cec5SDimitry Andric #include "CGCXXABI.h"
150b57cec5SDimitry Andric #include "CodeGenTypes.h"
160b57cec5SDimitry Andric #include "clang/AST/ASTContext.h"
170b57cec5SDimitry Andric #include "clang/AST/Attr.h"
180b57cec5SDimitry Andric #include "clang/AST/CXXInheritance.h"
190b57cec5SDimitry Andric #include "clang/AST/DeclCXX.h"
200b57cec5SDimitry Andric #include "clang/AST/Expr.h"
210b57cec5SDimitry Andric #include "clang/AST/RecordLayout.h"
220b57cec5SDimitry Andric #include "clang/Basic/CodeGenOptions.h"
230b57cec5SDimitry Andric #include "llvm/IR/DataLayout.h"
240b57cec5SDimitry Andric #include "llvm/IR/DerivedTypes.h"
250b57cec5SDimitry Andric #include "llvm/IR/Type.h"
260b57cec5SDimitry Andric #include "llvm/Support/Debug.h"
270b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h"
280b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h"
290b57cec5SDimitry Andric using namespace clang;
300b57cec5SDimitry Andric using namespace CodeGen;
310b57cec5SDimitry Andric 
320b57cec5SDimitry Andric namespace {
330b57cec5SDimitry Andric /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
340b57cec5SDimitry Andric /// llvm::Type.  Some of the lowering is straightforward, some is not.  Here we
350b57cec5SDimitry Andric /// detail some of the complexities and weirdnesses here.
360b57cec5SDimitry Andric /// * LLVM does not have unions - Unions can, in theory be represented by any
370b57cec5SDimitry Andric ///   llvm::Type with correct size.  We choose a field via a specific heuristic
380b57cec5SDimitry Andric ///   and add padding if necessary.
390b57cec5SDimitry Andric /// * LLVM does not have bitfields - Bitfields are collected into contiguous
400b57cec5SDimitry Andric ///   runs and allocated as a single storage type for the run.  ASTRecordLayout
410b57cec5SDimitry Andric ///   contains enough information to determine where the runs break.  Microsoft
420b57cec5SDimitry Andric ///   and Itanium follow different rules and use different codepaths.
430b57cec5SDimitry Andric /// * It is desired that, when possible, bitfields use the appropriate iN type
440b57cec5SDimitry Andric ///   when lowered to llvm types.  For example unsigned x : 24 gets lowered to
450b57cec5SDimitry Andric ///   i24.  This isn't always possible because i24 has storage size of 32 bit
460b57cec5SDimitry Andric ///   and if it is possible to use that extra byte of padding we must use
470b57cec5SDimitry Andric ///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
480b57cec5SDimitry Andric ///   C++ examples that require clipping:
490b57cec5SDimitry Andric ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
500b57cec5SDimitry Andric ///   struct A { int a : 24; }; // a must be clipped because a struct like B
510b57cec5SDimitry Andric //    could exist: struct B : A { char b; }; // b goes at offset 3
520b57cec5SDimitry Andric /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
530b57cec5SDimitry Andric ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
540b57cec5SDimitry Andric ///   has an underlying storage type.  Therefore empty structures containing
550b57cec5SDimitry Andric ///   zero sized subobjects such as empty records or zero sized arrays still get
560b57cec5SDimitry Andric ///   a zero sized (empty struct) storage type.
570b57cec5SDimitry Andric /// * Clang reads the complete type rather than the base type when generating
580b57cec5SDimitry Andric ///   code to access fields.  Bitfields in tail position with tail padding may
590b57cec5SDimitry Andric ///   be clipped in the base class but not the complete class (we may discover
600b57cec5SDimitry Andric ///   that the tail padding is not used in the complete class.) However,
610b57cec5SDimitry Andric ///   because LLVM reads from the complete type it can generate incorrect code
620b57cec5SDimitry Andric ///   if we do not clip the tail padding off of the bitfield in the complete
630b57cec5SDimitry Andric ///   layout.  This introduces a somewhat awkward extra unnecessary clip stage.
640b57cec5SDimitry Andric ///   The location of the clip is stored internally as a sentinel of type
650b57cec5SDimitry Andric ///   SCISSOR.  If LLVM were updated to read base types (which it probably
660b57cec5SDimitry Andric ///   should because locations of things such as VBases are bogus in the llvm
670b57cec5SDimitry Andric ///   type anyway) then we could eliminate the SCISSOR.
680b57cec5SDimitry Andric /// * Itanium allows nearly empty primary virtual bases.  These bases don't get
690b57cec5SDimitry Andric ///   get their own storage because they're laid out as part of another base
700b57cec5SDimitry Andric ///   or at the beginning of the structure.  Determining if a VBase actually
710b57cec5SDimitry Andric ///   gets storage awkwardly involves a walk of all bases.
720b57cec5SDimitry Andric /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
730b57cec5SDimitry Andric struct CGRecordLowering {
740b57cec5SDimitry Andric   // MemberInfo is a helper structure that contains information about a record
750b57cec5SDimitry Andric   // member.  In additional to the standard member types, there exists a
760b57cec5SDimitry Andric   // sentinel member type that ensures correct rounding.
770b57cec5SDimitry Andric   struct MemberInfo {
780b57cec5SDimitry Andric     CharUnits Offset;
790b57cec5SDimitry Andric     enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
800b57cec5SDimitry Andric     llvm::Type *Data;
810b57cec5SDimitry Andric     union {
820b57cec5SDimitry Andric       const FieldDecl *FD;
830b57cec5SDimitry Andric       const CXXRecordDecl *RD;
840b57cec5SDimitry Andric     };
850b57cec5SDimitry Andric     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
860b57cec5SDimitry Andric                const FieldDecl *FD = nullptr)
870b57cec5SDimitry Andric       : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
880b57cec5SDimitry Andric     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
890b57cec5SDimitry Andric                const CXXRecordDecl *RD)
900b57cec5SDimitry Andric       : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
910b57cec5SDimitry Andric     // MemberInfos are sorted so we define a < operator.
920b57cec5SDimitry Andric     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
930b57cec5SDimitry Andric   };
940b57cec5SDimitry Andric   // The constructor.
950b57cec5SDimitry Andric   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
960b57cec5SDimitry Andric   // Short helper routines.
970b57cec5SDimitry Andric   /// Constructs a MemberInfo instance from an offset and llvm::Type *.
980b57cec5SDimitry Andric   MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
990b57cec5SDimitry Andric     return MemberInfo(Offset, MemberInfo::Field, Data);
1000b57cec5SDimitry Andric   }
1010b57cec5SDimitry Andric 
1020b57cec5SDimitry Andric   /// The Microsoft bitfield layout rule allocates discrete storage
1030b57cec5SDimitry Andric   /// units of the field's formal type and only combines adjacent
1040b57cec5SDimitry Andric   /// fields of the same formal type.  We want to emit a layout with
1050b57cec5SDimitry Andric   /// these discrete storage units instead of combining them into a
1060b57cec5SDimitry Andric   /// continuous run.
1070b57cec5SDimitry Andric   bool isDiscreteBitFieldABI() {
1080b57cec5SDimitry Andric     return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1090b57cec5SDimitry Andric            D->isMsStruct(Context);
1100b57cec5SDimitry Andric   }
1110b57cec5SDimitry Andric 
1120b57cec5SDimitry Andric   /// The Itanium base layout rule allows virtual bases to overlap
1130b57cec5SDimitry Andric   /// other bases, which complicates layout in specific ways.
1140b57cec5SDimitry Andric   ///
1150b57cec5SDimitry Andric   /// Note specifically that the ms_struct attribute doesn't change this.
1160b57cec5SDimitry Andric   bool isOverlappingVBaseABI() {
1170b57cec5SDimitry Andric     return !Context.getTargetInfo().getCXXABI().isMicrosoft();
1180b57cec5SDimitry Andric   }
1190b57cec5SDimitry Andric 
1200b57cec5SDimitry Andric   /// Wraps llvm::Type::getIntNTy with some implicit arguments.
1210b57cec5SDimitry Andric   llvm::Type *getIntNType(uint64_t NumBits) {
1220b57cec5SDimitry Andric     return llvm::Type::getIntNTy(Types.getLLVMContext(),
1230b57cec5SDimitry Andric                                  (unsigned)llvm::alignTo(NumBits, 8));
1240b57cec5SDimitry Andric   }
1250b57cec5SDimitry Andric   /// Gets an llvm type of size NumBytes and alignment 1.
1260b57cec5SDimitry Andric   llvm::Type *getByteArrayType(CharUnits NumBytes) {
1270b57cec5SDimitry Andric     assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
1280b57cec5SDimitry Andric     llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
1290b57cec5SDimitry Andric     return NumBytes == CharUnits::One() ? Type :
1300b57cec5SDimitry Andric         (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
1310b57cec5SDimitry Andric   }
1320b57cec5SDimitry Andric   /// Gets the storage type for a field decl and handles storage
1330b57cec5SDimitry Andric   /// for itanium bitfields that are smaller than their declared type.
1340b57cec5SDimitry Andric   llvm::Type *getStorageType(const FieldDecl *FD) {
1350b57cec5SDimitry Andric     llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
1360b57cec5SDimitry Andric     if (!FD->isBitField()) return Type;
1370b57cec5SDimitry Andric     if (isDiscreteBitFieldABI()) return Type;
1380b57cec5SDimitry Andric     return getIntNType(std::min(FD->getBitWidthValue(Context),
1390b57cec5SDimitry Andric                              (unsigned)Context.toBits(getSize(Type))));
1400b57cec5SDimitry Andric   }
1410b57cec5SDimitry Andric   /// Gets the llvm Basesubobject type from a CXXRecordDecl.
1420b57cec5SDimitry Andric   llvm::Type *getStorageType(const CXXRecordDecl *RD) {
1430b57cec5SDimitry Andric     return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
1440b57cec5SDimitry Andric   }
1450b57cec5SDimitry Andric   CharUnits bitsToCharUnits(uint64_t BitOffset) {
1460b57cec5SDimitry Andric     return Context.toCharUnitsFromBits(BitOffset);
1470b57cec5SDimitry Andric   }
1480b57cec5SDimitry Andric   CharUnits getSize(llvm::Type *Type) {
1490b57cec5SDimitry Andric     return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
1500b57cec5SDimitry Andric   }
1510b57cec5SDimitry Andric   CharUnits getAlignment(llvm::Type *Type) {
1520b57cec5SDimitry Andric     return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
1530b57cec5SDimitry Andric   }
1540b57cec5SDimitry Andric   bool isZeroInitializable(const FieldDecl *FD) {
1550b57cec5SDimitry Andric     return Types.isZeroInitializable(FD->getType());
1560b57cec5SDimitry Andric   }
1570b57cec5SDimitry Andric   bool isZeroInitializable(const RecordDecl *RD) {
1580b57cec5SDimitry Andric     return Types.isZeroInitializable(RD);
1590b57cec5SDimitry Andric   }
1600b57cec5SDimitry Andric   void appendPaddingBytes(CharUnits Size) {
1610b57cec5SDimitry Andric     if (!Size.isZero())
1620b57cec5SDimitry Andric       FieldTypes.push_back(getByteArrayType(Size));
1630b57cec5SDimitry Andric   }
1640b57cec5SDimitry Andric   uint64_t getFieldBitOffset(const FieldDecl *FD) {
1650b57cec5SDimitry Andric     return Layout.getFieldOffset(FD->getFieldIndex());
1660b57cec5SDimitry Andric   }
1670b57cec5SDimitry Andric   // Layout routines.
1680b57cec5SDimitry Andric   void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
1690b57cec5SDimitry Andric                        llvm::Type *StorageType);
1700b57cec5SDimitry Andric   /// Lowers an ASTRecordLayout to a llvm type.
1710b57cec5SDimitry Andric   void lower(bool NonVirtualBaseType);
1720b57cec5SDimitry Andric   void lowerUnion();
1730b57cec5SDimitry Andric   void accumulateFields();
1740b57cec5SDimitry Andric   void accumulateBitFields(RecordDecl::field_iterator Field,
1750b57cec5SDimitry Andric                         RecordDecl::field_iterator FieldEnd);
1760b57cec5SDimitry Andric   void accumulateBases();
1770b57cec5SDimitry Andric   void accumulateVPtrs();
1780b57cec5SDimitry Andric   void accumulateVBases();
1790b57cec5SDimitry Andric   /// Recursively searches all of the bases to find out if a vbase is
1800b57cec5SDimitry Andric   /// not the primary vbase of some base class.
1810b57cec5SDimitry Andric   bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
1820b57cec5SDimitry Andric   void calculateZeroInit();
1830b57cec5SDimitry Andric   /// Lowers bitfield storage types to I8 arrays for bitfields with tail
1840b57cec5SDimitry Andric   /// padding that is or can potentially be used.
1850b57cec5SDimitry Andric   void clipTailPadding();
1860b57cec5SDimitry Andric   /// Determines if we need a packed llvm struct.
1870b57cec5SDimitry Andric   void determinePacked(bool NVBaseType);
1880b57cec5SDimitry Andric   /// Inserts padding everywhere it's needed.
1890b57cec5SDimitry Andric   void insertPadding();
1900b57cec5SDimitry Andric   /// Fills out the structures that are ultimately consumed.
1910b57cec5SDimitry Andric   void fillOutputFields();
1920b57cec5SDimitry Andric   // Input memoization fields.
1930b57cec5SDimitry Andric   CodeGenTypes &Types;
1940b57cec5SDimitry Andric   const ASTContext &Context;
1950b57cec5SDimitry Andric   const RecordDecl *D;
1960b57cec5SDimitry Andric   const CXXRecordDecl *RD;
1970b57cec5SDimitry Andric   const ASTRecordLayout &Layout;
1980b57cec5SDimitry Andric   const llvm::DataLayout &DataLayout;
1990b57cec5SDimitry Andric   // Helpful intermediate data-structures.
2000b57cec5SDimitry Andric   std::vector<MemberInfo> Members;
2010b57cec5SDimitry Andric   // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
2020b57cec5SDimitry Andric   SmallVector<llvm::Type *, 16> FieldTypes;
2030b57cec5SDimitry Andric   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
2040b57cec5SDimitry Andric   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
2050b57cec5SDimitry Andric   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
2060b57cec5SDimitry Andric   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
2070b57cec5SDimitry Andric   bool IsZeroInitializable : 1;
2080b57cec5SDimitry Andric   bool IsZeroInitializableAsBase : 1;
2090b57cec5SDimitry Andric   bool Packed : 1;
2100b57cec5SDimitry Andric private:
2110b57cec5SDimitry Andric   CGRecordLowering(const CGRecordLowering &) = delete;
2120b57cec5SDimitry Andric   void operator =(const CGRecordLowering &) = delete;
2130b57cec5SDimitry Andric };
2140b57cec5SDimitry Andric } // namespace {
2150b57cec5SDimitry Andric 
2160b57cec5SDimitry Andric CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
2170b57cec5SDimitry Andric                                    bool Packed)
2180b57cec5SDimitry Andric     : Types(Types), Context(Types.getContext()), D(D),
2190b57cec5SDimitry Andric       RD(dyn_cast<CXXRecordDecl>(D)),
2200b57cec5SDimitry Andric       Layout(Types.getContext().getASTRecordLayout(D)),
2210b57cec5SDimitry Andric       DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
2220b57cec5SDimitry Andric       IsZeroInitializableAsBase(true), Packed(Packed) {}
2230b57cec5SDimitry Andric 
2240b57cec5SDimitry Andric void CGRecordLowering::setBitFieldInfo(
2250b57cec5SDimitry Andric     const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
2260b57cec5SDimitry Andric   CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
2270b57cec5SDimitry Andric   Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
2280b57cec5SDimitry Andric   Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
2290b57cec5SDimitry Andric   Info.Size = FD->getBitWidthValue(Context);
2300b57cec5SDimitry Andric   Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
2310b57cec5SDimitry Andric   Info.StorageOffset = StartOffset;
2320b57cec5SDimitry Andric   if (Info.Size > Info.StorageSize)
2330b57cec5SDimitry Andric     Info.Size = Info.StorageSize;
2340b57cec5SDimitry Andric   // Reverse the bit offsets for big endian machines. Because we represent
2350b57cec5SDimitry Andric   // a bitfield as a single large integer load, we can imagine the bits
2360b57cec5SDimitry Andric   // counting from the most-significant-bit instead of the
2370b57cec5SDimitry Andric   // least-significant-bit.
2380b57cec5SDimitry Andric   if (DataLayout.isBigEndian())
2390b57cec5SDimitry Andric     Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
2400b57cec5SDimitry Andric }
2410b57cec5SDimitry Andric 
2420b57cec5SDimitry Andric void CGRecordLowering::lower(bool NVBaseType) {
2430b57cec5SDimitry Andric   // The lowering process implemented in this function takes a variety of
2440b57cec5SDimitry Andric   // carefully ordered phases.
2450b57cec5SDimitry Andric   // 1) Store all members (fields and bases) in a list and sort them by offset.
2460b57cec5SDimitry Andric   // 2) Add a 1-byte capstone member at the Size of the structure.
2470b57cec5SDimitry Andric   // 3) Clip bitfield storages members if their tail padding is or might be
2480b57cec5SDimitry Andric   //    used by another field or base.  The clipping process uses the capstone
2490b57cec5SDimitry Andric   //    by treating it as another object that occurs after the record.
2500b57cec5SDimitry Andric   // 4) Determine if the llvm-struct requires packing.  It's important that this
2510b57cec5SDimitry Andric   //    phase occur after clipping, because clipping changes the llvm type.
2520b57cec5SDimitry Andric   //    This phase reads the offset of the capstone when determining packedness
2530b57cec5SDimitry Andric   //    and updates the alignment of the capstone to be equal of the alignment
2540b57cec5SDimitry Andric   //    of the record after doing so.
2550b57cec5SDimitry Andric   // 5) Insert padding everywhere it is needed.  This phase requires 'Packed' to
2560b57cec5SDimitry Andric   //    have been computed and needs to know the alignment of the record in
2570b57cec5SDimitry Andric   //    order to understand if explicit tail padding is needed.
2580b57cec5SDimitry Andric   // 6) Remove the capstone, we don't need it anymore.
2590b57cec5SDimitry Andric   // 7) Determine if this record can be zero-initialized.  This phase could have
2600b57cec5SDimitry Andric   //    been placed anywhere after phase 1.
2610b57cec5SDimitry Andric   // 8) Format the complete list of members in a way that can be consumed by
2620b57cec5SDimitry Andric   //    CodeGenTypes::ComputeRecordLayout.
2630b57cec5SDimitry Andric   CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
2640b57cec5SDimitry Andric   if (D->isUnion())
2650b57cec5SDimitry Andric     return lowerUnion();
2660b57cec5SDimitry Andric   accumulateFields();
2670b57cec5SDimitry Andric   // RD implies C++.
2680b57cec5SDimitry Andric   if (RD) {
2690b57cec5SDimitry Andric     accumulateVPtrs();
2700b57cec5SDimitry Andric     accumulateBases();
2710b57cec5SDimitry Andric     if (Members.empty())
2720b57cec5SDimitry Andric       return appendPaddingBytes(Size);
2730b57cec5SDimitry Andric     if (!NVBaseType)
2740b57cec5SDimitry Andric       accumulateVBases();
2750b57cec5SDimitry Andric   }
2760b57cec5SDimitry Andric   llvm::stable_sort(Members);
2770b57cec5SDimitry Andric   Members.push_back(StorageInfo(Size, getIntNType(8)));
2780b57cec5SDimitry Andric   clipTailPadding();
2790b57cec5SDimitry Andric   determinePacked(NVBaseType);
2800b57cec5SDimitry Andric   insertPadding();
2810b57cec5SDimitry Andric   Members.pop_back();
2820b57cec5SDimitry Andric   calculateZeroInit();
2830b57cec5SDimitry Andric   fillOutputFields();
2840b57cec5SDimitry Andric }
2850b57cec5SDimitry Andric 
2860b57cec5SDimitry Andric void CGRecordLowering::lowerUnion() {
2870b57cec5SDimitry Andric   CharUnits LayoutSize = Layout.getSize();
2880b57cec5SDimitry Andric   llvm::Type *StorageType = nullptr;
2890b57cec5SDimitry Andric   bool SeenNamedMember = false;
2900b57cec5SDimitry Andric   // Iterate through the fields setting bitFieldInfo and the Fields array. Also
2910b57cec5SDimitry Andric   // locate the "most appropriate" storage type.  The heuristic for finding the
2920b57cec5SDimitry Andric   // storage type isn't necessary, the first (non-0-length-bitfield) field's
2930b57cec5SDimitry Andric   // type would work fine and be simpler but would be different than what we've
2940b57cec5SDimitry Andric   // been doing and cause lit tests to change.
2950b57cec5SDimitry Andric   for (const auto *Field : D->fields()) {
2960b57cec5SDimitry Andric     if (Field->isBitField()) {
2970b57cec5SDimitry Andric       if (Field->isZeroLengthBitField(Context))
2980b57cec5SDimitry Andric         continue;
2990b57cec5SDimitry Andric       llvm::Type *FieldType = getStorageType(Field);
3000b57cec5SDimitry Andric       if (LayoutSize < getSize(FieldType))
3010b57cec5SDimitry Andric         FieldType = getByteArrayType(LayoutSize);
3020b57cec5SDimitry Andric       setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
3030b57cec5SDimitry Andric     }
3040b57cec5SDimitry Andric     Fields[Field->getCanonicalDecl()] = 0;
3050b57cec5SDimitry Andric     llvm::Type *FieldType = getStorageType(Field);
3060b57cec5SDimitry Andric     // Compute zero-initializable status.
3070b57cec5SDimitry Andric     // This union might not be zero initialized: it may contain a pointer to
3080b57cec5SDimitry Andric     // data member which might have some exotic initialization sequence.
3090b57cec5SDimitry Andric     // If this is the case, then we aught not to try and come up with a "better"
3100b57cec5SDimitry Andric     // type, it might not be very easy to come up with a Constant which
3110b57cec5SDimitry Andric     // correctly initializes it.
3120b57cec5SDimitry Andric     if (!SeenNamedMember) {
3130b57cec5SDimitry Andric       SeenNamedMember = Field->getIdentifier();
3140b57cec5SDimitry Andric       if (!SeenNamedMember)
3150b57cec5SDimitry Andric         if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
3160b57cec5SDimitry Andric           SeenNamedMember = FieldRD->findFirstNamedDataMember();
3170b57cec5SDimitry Andric       if (SeenNamedMember && !isZeroInitializable(Field)) {
3180b57cec5SDimitry Andric         IsZeroInitializable = IsZeroInitializableAsBase = false;
3190b57cec5SDimitry Andric         StorageType = FieldType;
3200b57cec5SDimitry Andric       }
3210b57cec5SDimitry Andric     }
3220b57cec5SDimitry Andric     // Because our union isn't zero initializable, we won't be getting a better
3230b57cec5SDimitry Andric     // storage type.
3240b57cec5SDimitry Andric     if (!IsZeroInitializable)
3250b57cec5SDimitry Andric       continue;
3260b57cec5SDimitry Andric     // Conditionally update our storage type if we've got a new "better" one.
3270b57cec5SDimitry Andric     if (!StorageType ||
3280b57cec5SDimitry Andric         getAlignment(FieldType) >  getAlignment(StorageType) ||
3290b57cec5SDimitry Andric         (getAlignment(FieldType) == getAlignment(StorageType) &&
3300b57cec5SDimitry Andric         getSize(FieldType) > getSize(StorageType)))
3310b57cec5SDimitry Andric       StorageType = FieldType;
3320b57cec5SDimitry Andric   }
3330b57cec5SDimitry Andric   // If we have no storage type just pad to the appropriate size and return.
3340b57cec5SDimitry Andric   if (!StorageType)
3350b57cec5SDimitry Andric     return appendPaddingBytes(LayoutSize);
3360b57cec5SDimitry Andric   // If our storage size was bigger than our required size (can happen in the
3370b57cec5SDimitry Andric   // case of packed bitfields on Itanium) then just use an I8 array.
3380b57cec5SDimitry Andric   if (LayoutSize < getSize(StorageType))
3390b57cec5SDimitry Andric     StorageType = getByteArrayType(LayoutSize);
3400b57cec5SDimitry Andric   FieldTypes.push_back(StorageType);
3410b57cec5SDimitry Andric   appendPaddingBytes(LayoutSize - getSize(StorageType));
3420b57cec5SDimitry Andric   // Set packed if we need it.
3430b57cec5SDimitry Andric   if (LayoutSize % getAlignment(StorageType))
3440b57cec5SDimitry Andric     Packed = true;
3450b57cec5SDimitry Andric }
3460b57cec5SDimitry Andric 
3470b57cec5SDimitry Andric void CGRecordLowering::accumulateFields() {
3480b57cec5SDimitry Andric   for (RecordDecl::field_iterator Field = D->field_begin(),
3490b57cec5SDimitry Andric                                   FieldEnd = D->field_end();
3500b57cec5SDimitry Andric     Field != FieldEnd;) {
3510b57cec5SDimitry Andric     if (Field->isBitField()) {
3520b57cec5SDimitry Andric       RecordDecl::field_iterator Start = Field;
3530b57cec5SDimitry Andric       // Iterate to gather the list of bitfields.
3540b57cec5SDimitry Andric       for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
3550b57cec5SDimitry Andric       accumulateBitFields(Start, Field);
3560b57cec5SDimitry Andric     } else if (!Field->isZeroSize(Context)) {
3570b57cec5SDimitry Andric       Members.push_back(MemberInfo(
3580b57cec5SDimitry Andric           bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
3590b57cec5SDimitry Andric           getStorageType(*Field), *Field));
3600b57cec5SDimitry Andric       ++Field;
3610b57cec5SDimitry Andric     } else {
3620b57cec5SDimitry Andric       ++Field;
3630b57cec5SDimitry Andric     }
3640b57cec5SDimitry Andric   }
3650b57cec5SDimitry Andric }
3660b57cec5SDimitry Andric 
3670b57cec5SDimitry Andric void
3680b57cec5SDimitry Andric CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
3690b57cec5SDimitry Andric                                       RecordDecl::field_iterator FieldEnd) {
3700b57cec5SDimitry Andric   // Run stores the first element of the current run of bitfields.  FieldEnd is
3710b57cec5SDimitry Andric   // used as a special value to note that we don't have a current run.  A
3720b57cec5SDimitry Andric   // bitfield run is a contiguous collection of bitfields that can be stored in
3730b57cec5SDimitry Andric   // the same storage block.  Zero-sized bitfields and bitfields that would
3740b57cec5SDimitry Andric   // cross an alignment boundary break a run and start a new one.
3750b57cec5SDimitry Andric   RecordDecl::field_iterator Run = FieldEnd;
3760b57cec5SDimitry Andric   // Tail is the offset of the first bit off the end of the current run.  It's
3770b57cec5SDimitry Andric   // used to determine if the ASTRecordLayout is treating these two bitfields as
3780b57cec5SDimitry Andric   // contiguous.  StartBitOffset is offset of the beginning of the Run.
3790b57cec5SDimitry Andric   uint64_t StartBitOffset, Tail = 0;
3800b57cec5SDimitry Andric   if (isDiscreteBitFieldABI()) {
3810b57cec5SDimitry Andric     for (; Field != FieldEnd; ++Field) {
3820b57cec5SDimitry Andric       uint64_t BitOffset = getFieldBitOffset(*Field);
3830b57cec5SDimitry Andric       // Zero-width bitfields end runs.
3840b57cec5SDimitry Andric       if (Field->isZeroLengthBitField(Context)) {
3850b57cec5SDimitry Andric         Run = FieldEnd;
3860b57cec5SDimitry Andric         continue;
3870b57cec5SDimitry Andric       }
3880b57cec5SDimitry Andric       llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
3890b57cec5SDimitry Andric       // If we don't have a run yet, or don't live within the previous run's
3900b57cec5SDimitry Andric       // allocated storage then we allocate some storage and start a new run.
3910b57cec5SDimitry Andric       if (Run == FieldEnd || BitOffset >= Tail) {
3920b57cec5SDimitry Andric         Run = Field;
3930b57cec5SDimitry Andric         StartBitOffset = BitOffset;
3940b57cec5SDimitry Andric         Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
3950b57cec5SDimitry Andric         // Add the storage member to the record.  This must be added to the
3960b57cec5SDimitry Andric         // record before the bitfield members so that it gets laid out before
3970b57cec5SDimitry Andric         // the bitfields it contains get laid out.
3980b57cec5SDimitry Andric         Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
3990b57cec5SDimitry Andric       }
4000b57cec5SDimitry Andric       // Bitfields get the offset of their storage but come afterward and remain
4010b57cec5SDimitry Andric       // there after a stable sort.
4020b57cec5SDimitry Andric       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
4030b57cec5SDimitry Andric                                    MemberInfo::Field, nullptr, *Field));
4040b57cec5SDimitry Andric     }
4050b57cec5SDimitry Andric     return;
4060b57cec5SDimitry Andric   }
4070b57cec5SDimitry Andric 
4080b57cec5SDimitry Andric   // Check if OffsetInRecord is better as a single field run. When OffsetInRecord
4090b57cec5SDimitry Andric   // has legal integer width, and its bitfield offset is naturally aligned, it
4100b57cec5SDimitry Andric   // is better to make the bitfield a separate storage component so as it can be
4110b57cec5SDimitry Andric   // accessed directly with lower cost.
4120b57cec5SDimitry Andric   auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
4130b57cec5SDimitry Andric                                       uint64_t StartBitOffset) {
4140b57cec5SDimitry Andric     if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
4150b57cec5SDimitry Andric       return false;
4160b57cec5SDimitry Andric     if (!DataLayout.isLegalInteger(OffsetInRecord))
4170b57cec5SDimitry Andric       return false;
4180b57cec5SDimitry Andric     // Make sure StartBitOffset is natually aligned if it is treated as an
4190b57cec5SDimitry Andric     // IType integer.
4200b57cec5SDimitry Andric      if (StartBitOffset %
4210b57cec5SDimitry Andric             Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
4220b57cec5SDimitry Andric         0)
4230b57cec5SDimitry Andric       return false;
4240b57cec5SDimitry Andric     return true;
4250b57cec5SDimitry Andric   };
4260b57cec5SDimitry Andric 
4270b57cec5SDimitry Andric   // The start field is better as a single field run.
4280b57cec5SDimitry Andric   bool StartFieldAsSingleRun = false;
4290b57cec5SDimitry Andric   for (;;) {
4300b57cec5SDimitry Andric     // Check to see if we need to start a new run.
4310b57cec5SDimitry Andric     if (Run == FieldEnd) {
4320b57cec5SDimitry Andric       // If we're out of fields, return.
4330b57cec5SDimitry Andric       if (Field == FieldEnd)
4340b57cec5SDimitry Andric         break;
4350b57cec5SDimitry Andric       // Any non-zero-length bitfield can start a new run.
4360b57cec5SDimitry Andric       if (!Field->isZeroLengthBitField(Context)) {
4370b57cec5SDimitry Andric         Run = Field;
4380b57cec5SDimitry Andric         StartBitOffset = getFieldBitOffset(*Field);
4390b57cec5SDimitry Andric         Tail = StartBitOffset + Field->getBitWidthValue(Context);
4400b57cec5SDimitry Andric         StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
4410b57cec5SDimitry Andric                                                          StartBitOffset);
4420b57cec5SDimitry Andric       }
4430b57cec5SDimitry Andric       ++Field;
4440b57cec5SDimitry Andric       continue;
4450b57cec5SDimitry Andric     }
4460b57cec5SDimitry Andric 
4470b57cec5SDimitry Andric     // If the start field of a new run is better as a single run, or
4480b57cec5SDimitry Andric     // if current field (or consecutive fields) is better as a single run, or
4490b57cec5SDimitry Andric     // if current field has zero width bitfield and either
4500b57cec5SDimitry Andric     // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
4510b57cec5SDimitry Andric     // true, or
4520b57cec5SDimitry Andric     // if the offset of current field is inconsistent with the offset of
4530b57cec5SDimitry Andric     // previous field plus its offset,
4540b57cec5SDimitry Andric     // skip the block below and go ahead to emit the storage.
4550b57cec5SDimitry Andric     // Otherwise, try to add bitfields to the run.
4560b57cec5SDimitry Andric     if (!StartFieldAsSingleRun && Field != FieldEnd &&
4570b57cec5SDimitry Andric         !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
4580b57cec5SDimitry Andric         (!Field->isZeroLengthBitField(Context) ||
4590b57cec5SDimitry Andric          (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
4600b57cec5SDimitry Andric           !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
4610b57cec5SDimitry Andric         Tail == getFieldBitOffset(*Field)) {
4620b57cec5SDimitry Andric       Tail += Field->getBitWidthValue(Context);
4630b57cec5SDimitry Andric       ++Field;
4640b57cec5SDimitry Andric       continue;
4650b57cec5SDimitry Andric     }
4660b57cec5SDimitry Andric 
4670b57cec5SDimitry Andric     // We've hit a break-point in the run and need to emit a storage field.
4680b57cec5SDimitry Andric     llvm::Type *Type = getIntNType(Tail - StartBitOffset);
4690b57cec5SDimitry Andric     // Add the storage member to the record and set the bitfield info for all of
4700b57cec5SDimitry Andric     // the bitfields in the run.  Bitfields get the offset of their storage but
4710b57cec5SDimitry Andric     // come afterward and remain there after a stable sort.
4720b57cec5SDimitry Andric     Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
4730b57cec5SDimitry Andric     for (; Run != Field; ++Run)
4740b57cec5SDimitry Andric       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
4750b57cec5SDimitry Andric                                    MemberInfo::Field, nullptr, *Run));
4760b57cec5SDimitry Andric     Run = FieldEnd;
4770b57cec5SDimitry Andric     StartFieldAsSingleRun = false;
4780b57cec5SDimitry Andric   }
4790b57cec5SDimitry Andric }
4800b57cec5SDimitry Andric 
4810b57cec5SDimitry Andric void CGRecordLowering::accumulateBases() {
4820b57cec5SDimitry Andric   // If we've got a primary virtual base, we need to add it with the bases.
4830b57cec5SDimitry Andric   if (Layout.isPrimaryBaseVirtual()) {
4840b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
4850b57cec5SDimitry Andric     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
4860b57cec5SDimitry Andric                                  getStorageType(BaseDecl), BaseDecl));
4870b57cec5SDimitry Andric   }
4880b57cec5SDimitry Andric   // Accumulate the non-virtual bases.
4890b57cec5SDimitry Andric   for (const auto &Base : RD->bases()) {
4900b57cec5SDimitry Andric     if (Base.isVirtual())
4910b57cec5SDimitry Andric       continue;
4920b57cec5SDimitry Andric 
4930b57cec5SDimitry Andric     // Bases can be zero-sized even if not technically empty if they
4940b57cec5SDimitry Andric     // contain only a trailing array member.
4950b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
4960b57cec5SDimitry Andric     if (!BaseDecl->isEmpty() &&
4970b57cec5SDimitry Andric         !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
4980b57cec5SDimitry Andric       Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
4990b57cec5SDimitry Andric           MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
5000b57cec5SDimitry Andric   }
5010b57cec5SDimitry Andric }
5020b57cec5SDimitry Andric 
5030b57cec5SDimitry Andric void CGRecordLowering::accumulateVPtrs() {
5040b57cec5SDimitry Andric   if (Layout.hasOwnVFPtr())
5050b57cec5SDimitry Andric     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
5060b57cec5SDimitry Andric         llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
5070b57cec5SDimitry Andric             getPointerTo()->getPointerTo()));
5080b57cec5SDimitry Andric   if (Layout.hasOwnVBPtr())
5090b57cec5SDimitry Andric     Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
5100b57cec5SDimitry Andric         llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
5110b57cec5SDimitry Andric }
5120b57cec5SDimitry Andric 
5130b57cec5SDimitry Andric void CGRecordLowering::accumulateVBases() {
5140b57cec5SDimitry Andric   CharUnits ScissorOffset = Layout.getNonVirtualSize();
5150b57cec5SDimitry Andric   // In the itanium ABI, it's possible to place a vbase at a dsize that is
5160b57cec5SDimitry Andric   // smaller than the nvsize.  Here we check to see if such a base is placed
5170b57cec5SDimitry Andric   // before the nvsize and set the scissor offset to that, instead of the
5180b57cec5SDimitry Andric   // nvsize.
5190b57cec5SDimitry Andric   if (isOverlappingVBaseABI())
5200b57cec5SDimitry Andric     for (const auto &Base : RD->vbases()) {
5210b57cec5SDimitry Andric       const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
5220b57cec5SDimitry Andric       if (BaseDecl->isEmpty())
5230b57cec5SDimitry Andric         continue;
5240b57cec5SDimitry Andric       // If the vbase is a primary virtual base of some base, then it doesn't
5250b57cec5SDimitry Andric       // get its own storage location but instead lives inside of that base.
5260b57cec5SDimitry Andric       if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
5270b57cec5SDimitry Andric         continue;
5280b57cec5SDimitry Andric       ScissorOffset = std::min(ScissorOffset,
5290b57cec5SDimitry Andric                                Layout.getVBaseClassOffset(BaseDecl));
5300b57cec5SDimitry Andric     }
5310b57cec5SDimitry Andric   Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
5320b57cec5SDimitry Andric                                RD));
5330b57cec5SDimitry Andric   for (const auto &Base : RD->vbases()) {
5340b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
5350b57cec5SDimitry Andric     if (BaseDecl->isEmpty())
5360b57cec5SDimitry Andric       continue;
5370b57cec5SDimitry Andric     CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
5380b57cec5SDimitry Andric     // If the vbase is a primary virtual base of some base, then it doesn't
5390b57cec5SDimitry Andric     // get its own storage location but instead lives inside of that base.
5400b57cec5SDimitry Andric     if (isOverlappingVBaseABI() &&
5410b57cec5SDimitry Andric         Context.isNearlyEmpty(BaseDecl) &&
5420b57cec5SDimitry Andric         !hasOwnStorage(RD, BaseDecl)) {
5430b57cec5SDimitry Andric       Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
5440b57cec5SDimitry Andric                                    BaseDecl));
5450b57cec5SDimitry Andric       continue;
5460b57cec5SDimitry Andric     }
5470b57cec5SDimitry Andric     // If we've got a vtordisp, add it as a storage type.
5480b57cec5SDimitry Andric     if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
5490b57cec5SDimitry Andric       Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
5500b57cec5SDimitry Andric                                     getIntNType(32)));
5510b57cec5SDimitry Andric     Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
5520b57cec5SDimitry Andric                                  getStorageType(BaseDecl), BaseDecl));
5530b57cec5SDimitry Andric   }
5540b57cec5SDimitry Andric }
5550b57cec5SDimitry Andric 
5560b57cec5SDimitry Andric bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
5570b57cec5SDimitry Andric                                      const CXXRecordDecl *Query) {
5580b57cec5SDimitry Andric   const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
5590b57cec5SDimitry Andric   if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
5600b57cec5SDimitry Andric     return false;
5610b57cec5SDimitry Andric   for (const auto &Base : Decl->bases())
5620b57cec5SDimitry Andric     if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
5630b57cec5SDimitry Andric       return false;
5640b57cec5SDimitry Andric   return true;
5650b57cec5SDimitry Andric }
5660b57cec5SDimitry Andric 
5670b57cec5SDimitry Andric void CGRecordLowering::calculateZeroInit() {
5680b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
5690b57cec5SDimitry Andric                                                MemberEnd = Members.end();
5700b57cec5SDimitry Andric        IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
5710b57cec5SDimitry Andric     if (Member->Kind == MemberInfo::Field) {
5720b57cec5SDimitry Andric       if (!Member->FD || isZeroInitializable(Member->FD))
5730b57cec5SDimitry Andric         continue;
5740b57cec5SDimitry Andric       IsZeroInitializable = IsZeroInitializableAsBase = false;
5750b57cec5SDimitry Andric     } else if (Member->Kind == MemberInfo::Base ||
5760b57cec5SDimitry Andric                Member->Kind == MemberInfo::VBase) {
5770b57cec5SDimitry Andric       if (isZeroInitializable(Member->RD))
5780b57cec5SDimitry Andric         continue;
5790b57cec5SDimitry Andric       IsZeroInitializable = false;
5800b57cec5SDimitry Andric       if (Member->Kind == MemberInfo::Base)
5810b57cec5SDimitry Andric         IsZeroInitializableAsBase = false;
5820b57cec5SDimitry Andric     }
5830b57cec5SDimitry Andric   }
5840b57cec5SDimitry Andric }
5850b57cec5SDimitry Andric 
5860b57cec5SDimitry Andric void CGRecordLowering::clipTailPadding() {
5870b57cec5SDimitry Andric   std::vector<MemberInfo>::iterator Prior = Members.begin();
5880b57cec5SDimitry Andric   CharUnits Tail = getSize(Prior->Data);
5890b57cec5SDimitry Andric   for (std::vector<MemberInfo>::iterator Member = Prior + 1,
5900b57cec5SDimitry Andric                                          MemberEnd = Members.end();
5910b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
5920b57cec5SDimitry Andric     // Only members with data and the scissor can cut into tail padding.
5930b57cec5SDimitry Andric     if (!Member->Data && Member->Kind != MemberInfo::Scissor)
5940b57cec5SDimitry Andric       continue;
5950b57cec5SDimitry Andric     if (Member->Offset < Tail) {
5960b57cec5SDimitry Andric       assert(Prior->Kind == MemberInfo::Field &&
5970b57cec5SDimitry Andric              "Only storage fields have tail padding!");
5980b57cec5SDimitry Andric       if (!Prior->FD || Prior->FD->isBitField())
5990b57cec5SDimitry Andric         Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
6000b57cec5SDimitry Andric             cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
6010b57cec5SDimitry Andric       else {
6020b57cec5SDimitry Andric         assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
6030b57cec5SDimitry Andric                "should not have reused this field's tail padding");
6040b57cec5SDimitry Andric         Prior->Data = getByteArrayType(
6050b57cec5SDimitry Andric             Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).first);
6060b57cec5SDimitry Andric       }
6070b57cec5SDimitry Andric     }
6080b57cec5SDimitry Andric     if (Member->Data)
6090b57cec5SDimitry Andric       Prior = Member;
6100b57cec5SDimitry Andric     Tail = Prior->Offset + getSize(Prior->Data);
6110b57cec5SDimitry Andric   }
6120b57cec5SDimitry Andric }
6130b57cec5SDimitry Andric 
6140b57cec5SDimitry Andric void CGRecordLowering::determinePacked(bool NVBaseType) {
6150b57cec5SDimitry Andric   if (Packed)
6160b57cec5SDimitry Andric     return;
6170b57cec5SDimitry Andric   CharUnits Alignment = CharUnits::One();
6180b57cec5SDimitry Andric   CharUnits NVAlignment = CharUnits::One();
6190b57cec5SDimitry Andric   CharUnits NVSize =
6200b57cec5SDimitry Andric       !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
6210b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
6220b57cec5SDimitry Andric                                                MemberEnd = Members.end();
6230b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
6240b57cec5SDimitry Andric     if (!Member->Data)
6250b57cec5SDimitry Andric       continue;
6260b57cec5SDimitry Andric     // If any member falls at an offset that it not a multiple of its alignment,
6270b57cec5SDimitry Andric     // then the entire record must be packed.
6280b57cec5SDimitry Andric     if (Member->Offset % getAlignment(Member->Data))
6290b57cec5SDimitry Andric       Packed = true;
6300b57cec5SDimitry Andric     if (Member->Offset < NVSize)
6310b57cec5SDimitry Andric       NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
6320b57cec5SDimitry Andric     Alignment = std::max(Alignment, getAlignment(Member->Data));
6330b57cec5SDimitry Andric   }
6340b57cec5SDimitry Andric   // If the size of the record (the capstone's offset) is not a multiple of the
6350b57cec5SDimitry Andric   // record's alignment, it must be packed.
6360b57cec5SDimitry Andric   if (Members.back().Offset % Alignment)
6370b57cec5SDimitry Andric     Packed = true;
6380b57cec5SDimitry Andric   // If the non-virtual sub-object is not a multiple of the non-virtual
6390b57cec5SDimitry Andric   // sub-object's alignment, it must be packed.  We cannot have a packed
6400b57cec5SDimitry Andric   // non-virtual sub-object and an unpacked complete object or vise versa.
6410b57cec5SDimitry Andric   if (NVSize % NVAlignment)
6420b57cec5SDimitry Andric     Packed = true;
6430b57cec5SDimitry Andric   // Update the alignment of the sentinel.
6440b57cec5SDimitry Andric   if (!Packed)
6450b57cec5SDimitry Andric     Members.back().Data = getIntNType(Context.toBits(Alignment));
6460b57cec5SDimitry Andric }
6470b57cec5SDimitry Andric 
6480b57cec5SDimitry Andric void CGRecordLowering::insertPadding() {
6490b57cec5SDimitry Andric   std::vector<std::pair<CharUnits, CharUnits> > Padding;
6500b57cec5SDimitry Andric   CharUnits Size = CharUnits::Zero();
6510b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
6520b57cec5SDimitry Andric                                                MemberEnd = Members.end();
6530b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
6540b57cec5SDimitry Andric     if (!Member->Data)
6550b57cec5SDimitry Andric       continue;
6560b57cec5SDimitry Andric     CharUnits Offset = Member->Offset;
6570b57cec5SDimitry Andric     assert(Offset >= Size);
6580b57cec5SDimitry Andric     // Insert padding if we need to.
6590b57cec5SDimitry Andric     if (Offset !=
6600b57cec5SDimitry Andric         Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
6610b57cec5SDimitry Andric       Padding.push_back(std::make_pair(Size, Offset - Size));
6620b57cec5SDimitry Andric     Size = Offset + getSize(Member->Data);
6630b57cec5SDimitry Andric   }
6640b57cec5SDimitry Andric   if (Padding.empty())
6650b57cec5SDimitry Andric     return;
6660b57cec5SDimitry Andric   // Add the padding to the Members list and sort it.
6670b57cec5SDimitry Andric   for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
6680b57cec5SDimitry Andric         Pad = Padding.begin(), PadEnd = Padding.end();
6690b57cec5SDimitry Andric         Pad != PadEnd; ++Pad)
6700b57cec5SDimitry Andric     Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
6710b57cec5SDimitry Andric   llvm::stable_sort(Members);
6720b57cec5SDimitry Andric }
6730b57cec5SDimitry Andric 
6740b57cec5SDimitry Andric void CGRecordLowering::fillOutputFields() {
6750b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
6760b57cec5SDimitry Andric                                                MemberEnd = Members.end();
6770b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
6780b57cec5SDimitry Andric     if (Member->Data)
6790b57cec5SDimitry Andric       FieldTypes.push_back(Member->Data);
6800b57cec5SDimitry Andric     if (Member->Kind == MemberInfo::Field) {
6810b57cec5SDimitry Andric       if (Member->FD)
6820b57cec5SDimitry Andric         Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
6830b57cec5SDimitry Andric       // A field without storage must be a bitfield.
6840b57cec5SDimitry Andric       if (!Member->Data)
6850b57cec5SDimitry Andric         setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
6860b57cec5SDimitry Andric     } else if (Member->Kind == MemberInfo::Base)
6870b57cec5SDimitry Andric       NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
6880b57cec5SDimitry Andric     else if (Member->Kind == MemberInfo::VBase)
6890b57cec5SDimitry Andric       VirtualBases[Member->RD] = FieldTypes.size() - 1;
6900b57cec5SDimitry Andric   }
6910b57cec5SDimitry Andric }
6920b57cec5SDimitry Andric 
6930b57cec5SDimitry Andric CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
6940b57cec5SDimitry Andric                                         const FieldDecl *FD,
6950b57cec5SDimitry Andric                                         uint64_t Offset, uint64_t Size,
6960b57cec5SDimitry Andric                                         uint64_t StorageSize,
6970b57cec5SDimitry Andric                                         CharUnits StorageOffset) {
6980b57cec5SDimitry Andric   // This function is vestigial from CGRecordLayoutBuilder days but is still
6990b57cec5SDimitry Andric   // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
7000b57cec5SDimitry Andric   // when addressed will allow for the removal of this function.
7010b57cec5SDimitry Andric   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
7020b57cec5SDimitry Andric   CharUnits TypeSizeInBytes =
7030b57cec5SDimitry Andric     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
7040b57cec5SDimitry Andric   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
7050b57cec5SDimitry Andric 
7060b57cec5SDimitry Andric   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
7070b57cec5SDimitry Andric 
7080b57cec5SDimitry Andric   if (Size > TypeSizeInBits) {
7090b57cec5SDimitry Andric     // We have a wide bit-field. The extra bits are only used for padding, so
7100b57cec5SDimitry Andric     // if we have a bitfield of type T, with size N:
7110b57cec5SDimitry Andric     //
7120b57cec5SDimitry Andric     // T t : N;
7130b57cec5SDimitry Andric     //
7140b57cec5SDimitry Andric     // We can just assume that it's:
7150b57cec5SDimitry Andric     //
7160b57cec5SDimitry Andric     // T t : sizeof(T);
7170b57cec5SDimitry Andric     //
7180b57cec5SDimitry Andric     Size = TypeSizeInBits;
7190b57cec5SDimitry Andric   }
7200b57cec5SDimitry Andric 
7210b57cec5SDimitry Andric   // Reverse the bit offsets for big endian machines. Because we represent
7220b57cec5SDimitry Andric   // a bitfield as a single large integer load, we can imagine the bits
7230b57cec5SDimitry Andric   // counting from the most-significant-bit instead of the
7240b57cec5SDimitry Andric   // least-significant-bit.
7250b57cec5SDimitry Andric   if (Types.getDataLayout().isBigEndian()) {
7260b57cec5SDimitry Andric     Offset = StorageSize - (Offset + Size);
7270b57cec5SDimitry Andric   }
7280b57cec5SDimitry Andric 
7290b57cec5SDimitry Andric   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
7300b57cec5SDimitry Andric }
7310b57cec5SDimitry Andric 
7320b57cec5SDimitry Andric CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
7330b57cec5SDimitry Andric                                                   llvm::StructType *Ty) {
7340b57cec5SDimitry Andric   CGRecordLowering Builder(*this, D, /*Packed=*/false);
7350b57cec5SDimitry Andric 
7360b57cec5SDimitry Andric   Builder.lower(/*NonVirtualBaseType=*/false);
7370b57cec5SDimitry Andric 
7380b57cec5SDimitry Andric   // If we're in C++, compute the base subobject type.
7390b57cec5SDimitry Andric   llvm::StructType *BaseTy = nullptr;
7400b57cec5SDimitry Andric   if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
7410b57cec5SDimitry Andric     BaseTy = Ty;
7420b57cec5SDimitry Andric     if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
7430b57cec5SDimitry Andric       CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
7440b57cec5SDimitry Andric       BaseBuilder.lower(/*NonVirtualBaseType=*/true);
7450b57cec5SDimitry Andric       BaseTy = llvm::StructType::create(
7460b57cec5SDimitry Andric           getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
7470b57cec5SDimitry Andric       addRecordTypeName(D, BaseTy, ".base");
7480b57cec5SDimitry Andric       // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
7490b57cec5SDimitry Andric       // on both of them with the same index.
7500b57cec5SDimitry Andric       assert(Builder.Packed == BaseBuilder.Packed &&
7510b57cec5SDimitry Andric              "Non-virtual and complete types must agree on packedness");
7520b57cec5SDimitry Andric     }
7530b57cec5SDimitry Andric   }
7540b57cec5SDimitry Andric 
7550b57cec5SDimitry Andric   // Fill in the struct *after* computing the base type.  Filling in the body
7560b57cec5SDimitry Andric   // signifies that the type is no longer opaque and record layout is complete,
7570b57cec5SDimitry Andric   // but we may need to recursively layout D while laying D out as a base type.
7580b57cec5SDimitry Andric   Ty->setBody(Builder.FieldTypes, Builder.Packed);
7590b57cec5SDimitry Andric 
7600b57cec5SDimitry Andric   CGRecordLayout *RL =
7610b57cec5SDimitry Andric     new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
7620b57cec5SDimitry Andric                         Builder.IsZeroInitializableAsBase);
7630b57cec5SDimitry Andric 
7640b57cec5SDimitry Andric   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
7650b57cec5SDimitry Andric   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
7660b57cec5SDimitry Andric 
7670b57cec5SDimitry Andric   // Add all the field numbers.
7680b57cec5SDimitry Andric   RL->FieldInfo.swap(Builder.Fields);
7690b57cec5SDimitry Andric 
7700b57cec5SDimitry Andric   // Add bitfield info.
7710b57cec5SDimitry Andric   RL->BitFields.swap(Builder.BitFields);
7720b57cec5SDimitry Andric 
7730b57cec5SDimitry Andric   // Dump the layout, if requested.
7740b57cec5SDimitry Andric   if (getContext().getLangOpts().DumpRecordLayouts) {
7750b57cec5SDimitry Andric     llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
7760b57cec5SDimitry Andric     llvm::outs() << "Record: ";
7770b57cec5SDimitry Andric     D->dump(llvm::outs());
7780b57cec5SDimitry Andric     llvm::outs() << "\nLayout: ";
7790b57cec5SDimitry Andric     RL->print(llvm::outs());
7800b57cec5SDimitry Andric   }
7810b57cec5SDimitry Andric 
7820b57cec5SDimitry Andric #ifndef NDEBUG
7830b57cec5SDimitry Andric   // Verify that the computed LLVM struct size matches the AST layout size.
7840b57cec5SDimitry Andric   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
7850b57cec5SDimitry Andric 
7860b57cec5SDimitry Andric   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
7870b57cec5SDimitry Andric   assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
7880b57cec5SDimitry Andric          "Type size mismatch!");
7890b57cec5SDimitry Andric 
7900b57cec5SDimitry Andric   if (BaseTy) {
7910b57cec5SDimitry Andric     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
7920b57cec5SDimitry Andric 
7930b57cec5SDimitry Andric     uint64_t AlignedNonVirtualTypeSizeInBits =
7940b57cec5SDimitry Andric       getContext().toBits(NonVirtualSize);
7950b57cec5SDimitry Andric 
7960b57cec5SDimitry Andric     assert(AlignedNonVirtualTypeSizeInBits ==
7970b57cec5SDimitry Andric            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
7980b57cec5SDimitry Andric            "Type size mismatch!");
7990b57cec5SDimitry Andric   }
8000b57cec5SDimitry Andric 
8010b57cec5SDimitry Andric   // Verify that the LLVM and AST field offsets agree.
8020b57cec5SDimitry Andric   llvm::StructType *ST = RL->getLLVMType();
8030b57cec5SDimitry Andric   const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
8040b57cec5SDimitry Andric 
8050b57cec5SDimitry Andric   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
8060b57cec5SDimitry Andric   RecordDecl::field_iterator it = D->field_begin();
8070b57cec5SDimitry Andric   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
8080b57cec5SDimitry Andric     const FieldDecl *FD = *it;
8090b57cec5SDimitry Andric 
8100b57cec5SDimitry Andric     // Ignore zero-sized fields.
8110b57cec5SDimitry Andric     if (FD->isZeroSize(getContext()))
8120b57cec5SDimitry Andric       continue;
8130b57cec5SDimitry Andric 
8140b57cec5SDimitry Andric     // For non-bit-fields, just check that the LLVM struct offset matches the
8150b57cec5SDimitry Andric     // AST offset.
8160b57cec5SDimitry Andric     if (!FD->isBitField()) {
8170b57cec5SDimitry Andric       unsigned FieldNo = RL->getLLVMFieldNo(FD);
8180b57cec5SDimitry Andric       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
8190b57cec5SDimitry Andric              "Invalid field offset!");
8200b57cec5SDimitry Andric       continue;
8210b57cec5SDimitry Andric     }
8220b57cec5SDimitry Andric 
8230b57cec5SDimitry Andric     // Ignore unnamed bit-fields.
8240b57cec5SDimitry Andric     if (!FD->getDeclName())
8250b57cec5SDimitry Andric       continue;
8260b57cec5SDimitry Andric 
8270b57cec5SDimitry Andric     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
8280b57cec5SDimitry Andric     llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
8290b57cec5SDimitry Andric 
8300b57cec5SDimitry Andric     // Unions have overlapping elements dictating their layout, but for
8310b57cec5SDimitry Andric     // non-unions we can verify that this section of the layout is the exact
8320b57cec5SDimitry Andric     // expected size.
8330b57cec5SDimitry Andric     if (D->isUnion()) {
8340b57cec5SDimitry Andric       // For unions we verify that the start is zero and the size
8350b57cec5SDimitry Andric       // is in-bounds. However, on BE systems, the offset may be non-zero, but
8360b57cec5SDimitry Andric       // the size + offset should match the storage size in that case as it
8370b57cec5SDimitry Andric       // "starts" at the back.
8380b57cec5SDimitry Andric       if (getDataLayout().isBigEndian())
8390b57cec5SDimitry Andric         assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
8400b57cec5SDimitry Andric                Info.StorageSize &&
8410b57cec5SDimitry Andric                "Big endian union bitfield does not end at the back");
8420b57cec5SDimitry Andric       else
8430b57cec5SDimitry Andric         assert(Info.Offset == 0 &&
8440b57cec5SDimitry Andric                "Little endian union bitfield with a non-zero offset");
8450b57cec5SDimitry Andric       assert(Info.StorageSize <= SL->getSizeInBits() &&
8460b57cec5SDimitry Andric              "Union not large enough for bitfield storage");
8470b57cec5SDimitry Andric     } else {
8480b57cec5SDimitry Andric       assert(Info.StorageSize ==
8490b57cec5SDimitry Andric              getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
8500b57cec5SDimitry Andric              "Storage size does not match the element type size");
8510b57cec5SDimitry Andric     }
8520b57cec5SDimitry Andric     assert(Info.Size > 0 && "Empty bitfield!");
8530b57cec5SDimitry Andric     assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
8540b57cec5SDimitry Andric            "Bitfield outside of its allocated storage");
8550b57cec5SDimitry Andric   }
8560b57cec5SDimitry Andric #endif
8570b57cec5SDimitry Andric 
8580b57cec5SDimitry Andric   return RL;
8590b57cec5SDimitry Andric }
8600b57cec5SDimitry Andric 
8610b57cec5SDimitry Andric void CGRecordLayout::print(raw_ostream &OS) const {
8620b57cec5SDimitry Andric   OS << "<CGRecordLayout\n";
8630b57cec5SDimitry Andric   OS << "  LLVMType:" << *CompleteObjectType << "\n";
8640b57cec5SDimitry Andric   if (BaseSubobjectType)
8650b57cec5SDimitry Andric     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
8660b57cec5SDimitry Andric   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
8670b57cec5SDimitry Andric   OS << "  BitFields:[\n";
8680b57cec5SDimitry Andric 
8690b57cec5SDimitry Andric   // Print bit-field infos in declaration order.
8700b57cec5SDimitry Andric   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
8710b57cec5SDimitry Andric   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
8720b57cec5SDimitry Andric          it = BitFields.begin(), ie = BitFields.end();
8730b57cec5SDimitry Andric        it != ie; ++it) {
8740b57cec5SDimitry Andric     const RecordDecl *RD = it->first->getParent();
8750b57cec5SDimitry Andric     unsigned Index = 0;
8760b57cec5SDimitry Andric     for (RecordDecl::field_iterator
8770b57cec5SDimitry Andric            it2 = RD->field_begin(); *it2 != it->first; ++it2)
8780b57cec5SDimitry Andric       ++Index;
8790b57cec5SDimitry Andric     BFIs.push_back(std::make_pair(Index, &it->second));
8800b57cec5SDimitry Andric   }
8810b57cec5SDimitry Andric   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
8820b57cec5SDimitry Andric   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
8830b57cec5SDimitry Andric     OS.indent(4);
8840b57cec5SDimitry Andric     BFIs[i].second->print(OS);
8850b57cec5SDimitry Andric     OS << "\n";
8860b57cec5SDimitry Andric   }
8870b57cec5SDimitry Andric 
8880b57cec5SDimitry Andric   OS << "]>\n";
8890b57cec5SDimitry Andric }
8900b57cec5SDimitry Andric 
8910b57cec5SDimitry Andric LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
8920b57cec5SDimitry Andric   print(llvm::errs());
8930b57cec5SDimitry Andric }
8940b57cec5SDimitry Andric 
8950b57cec5SDimitry Andric void CGBitFieldInfo::print(raw_ostream &OS) const {
8960b57cec5SDimitry Andric   OS << "<CGBitFieldInfo"
8970b57cec5SDimitry Andric      << " Offset:" << Offset
8980b57cec5SDimitry Andric      << " Size:" << Size
8990b57cec5SDimitry Andric      << " IsSigned:" << IsSigned
9000b57cec5SDimitry Andric      << " StorageSize:" << StorageSize
9010b57cec5SDimitry Andric      << " StorageOffset:" << StorageOffset.getQuantity() << ">";
9020b57cec5SDimitry Andric }
9030b57cec5SDimitry Andric 
9040b57cec5SDimitry Andric LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
9050b57cec5SDimitry Andric   print(llvm::errs());
9060b57cec5SDimitry Andric }
907