106f32e7eSjoerg //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
206f32e7eSjoerg //
306f32e7eSjoerg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
406f32e7eSjoerg // See https://llvm.org/LICENSE.txt for license information.
506f32e7eSjoerg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
606f32e7eSjoerg //
706f32e7eSjoerg //===----------------------------------------------------------------------===//
806f32e7eSjoerg //
906f32e7eSjoerg // Builder implementation for CGRecordLayout objects.
1006f32e7eSjoerg //
1106f32e7eSjoerg //===----------------------------------------------------------------------===//
1206f32e7eSjoerg 
1306f32e7eSjoerg #include "CGRecordLayout.h"
1406f32e7eSjoerg #include "CGCXXABI.h"
1506f32e7eSjoerg #include "CodeGenTypes.h"
1606f32e7eSjoerg #include "clang/AST/ASTContext.h"
1706f32e7eSjoerg #include "clang/AST/Attr.h"
1806f32e7eSjoerg #include "clang/AST/CXXInheritance.h"
1906f32e7eSjoerg #include "clang/AST/DeclCXX.h"
2006f32e7eSjoerg #include "clang/AST/Expr.h"
2106f32e7eSjoerg #include "clang/AST/RecordLayout.h"
2206f32e7eSjoerg #include "clang/Basic/CodeGenOptions.h"
2306f32e7eSjoerg #include "llvm/IR/DataLayout.h"
2406f32e7eSjoerg #include "llvm/IR/DerivedTypes.h"
2506f32e7eSjoerg #include "llvm/IR/Type.h"
2606f32e7eSjoerg #include "llvm/Support/Debug.h"
2706f32e7eSjoerg #include "llvm/Support/MathExtras.h"
2806f32e7eSjoerg #include "llvm/Support/raw_ostream.h"
2906f32e7eSjoerg using namespace clang;
3006f32e7eSjoerg using namespace CodeGen;
3106f32e7eSjoerg 
3206f32e7eSjoerg namespace {
3306f32e7eSjoerg /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
3406f32e7eSjoerg /// llvm::Type.  Some of the lowering is straightforward, some is not.  Here we
3506f32e7eSjoerg /// detail some of the complexities and weirdnesses here.
3606f32e7eSjoerg /// * LLVM does not have unions - Unions can, in theory be represented by any
3706f32e7eSjoerg ///   llvm::Type with correct size.  We choose a field via a specific heuristic
3806f32e7eSjoerg ///   and add padding if necessary.
3906f32e7eSjoerg /// * LLVM does not have bitfields - Bitfields are collected into contiguous
4006f32e7eSjoerg ///   runs and allocated as a single storage type for the run.  ASTRecordLayout
4106f32e7eSjoerg ///   contains enough information to determine where the runs break.  Microsoft
4206f32e7eSjoerg ///   and Itanium follow different rules and use different codepaths.
4306f32e7eSjoerg /// * It is desired that, when possible, bitfields use the appropriate iN type
4406f32e7eSjoerg ///   when lowered to llvm types.  For example unsigned x : 24 gets lowered to
4506f32e7eSjoerg ///   i24.  This isn't always possible because i24 has storage size of 32 bit
4606f32e7eSjoerg ///   and if it is possible to use that extra byte of padding we must use
4706f32e7eSjoerg ///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
4806f32e7eSjoerg ///   C++ examples that require clipping:
4906f32e7eSjoerg ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
5006f32e7eSjoerg ///   struct A { int a : 24; }; // a must be clipped because a struct like B
5106f32e7eSjoerg //    could exist: struct B : A { char b; }; // b goes at offset 3
5206f32e7eSjoerg /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
5306f32e7eSjoerg ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
5406f32e7eSjoerg ///   has an underlying storage type.  Therefore empty structures containing
5506f32e7eSjoerg ///   zero sized subobjects such as empty records or zero sized arrays still get
5606f32e7eSjoerg ///   a zero sized (empty struct) storage type.
5706f32e7eSjoerg /// * Clang reads the complete type rather than the base type when generating
5806f32e7eSjoerg ///   code to access fields.  Bitfields in tail position with tail padding may
5906f32e7eSjoerg ///   be clipped in the base class but not the complete class (we may discover
6006f32e7eSjoerg ///   that the tail padding is not used in the complete class.) However,
6106f32e7eSjoerg ///   because LLVM reads from the complete type it can generate incorrect code
6206f32e7eSjoerg ///   if we do not clip the tail padding off of the bitfield in the complete
6306f32e7eSjoerg ///   layout.  This introduces a somewhat awkward extra unnecessary clip stage.
6406f32e7eSjoerg ///   The location of the clip is stored internally as a sentinel of type
6506f32e7eSjoerg ///   SCISSOR.  If LLVM were updated to read base types (which it probably
6606f32e7eSjoerg ///   should because locations of things such as VBases are bogus in the llvm
6706f32e7eSjoerg ///   type anyway) then we could eliminate the SCISSOR.
6806f32e7eSjoerg /// * Itanium allows nearly empty primary virtual bases.  These bases don't get
6906f32e7eSjoerg ///   get their own storage because they're laid out as part of another base
7006f32e7eSjoerg ///   or at the beginning of the structure.  Determining if a VBase actually
7106f32e7eSjoerg ///   gets storage awkwardly involves a walk of all bases.
7206f32e7eSjoerg /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
7306f32e7eSjoerg struct CGRecordLowering {
7406f32e7eSjoerg   // MemberInfo is a helper structure that contains information about a record
7506f32e7eSjoerg   // member.  In additional to the standard member types, there exists a
7606f32e7eSjoerg   // sentinel member type that ensures correct rounding.
7706f32e7eSjoerg   struct MemberInfo {
7806f32e7eSjoerg     CharUnits Offset;
7906f32e7eSjoerg     enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
8006f32e7eSjoerg     llvm::Type *Data;
8106f32e7eSjoerg     union {
8206f32e7eSjoerg       const FieldDecl *FD;
8306f32e7eSjoerg       const CXXRecordDecl *RD;
8406f32e7eSjoerg     };
MemberInfo__anonb225beed0111::CGRecordLowering::MemberInfo8506f32e7eSjoerg     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
8606f32e7eSjoerg                const FieldDecl *FD = nullptr)
8706f32e7eSjoerg       : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
MemberInfo__anonb225beed0111::CGRecordLowering::MemberInfo8806f32e7eSjoerg     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
8906f32e7eSjoerg                const CXXRecordDecl *RD)
9006f32e7eSjoerg       : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
9106f32e7eSjoerg     // MemberInfos are sorted so we define a < operator.
operator <__anonb225beed0111::CGRecordLowering::MemberInfo9206f32e7eSjoerg     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
9306f32e7eSjoerg   };
9406f32e7eSjoerg   // The constructor.
9506f32e7eSjoerg   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
9606f32e7eSjoerg   // Short helper routines.
9706f32e7eSjoerg   /// Constructs a MemberInfo instance from an offset and llvm::Type *.
StorageInfo__anonb225beed0111::CGRecordLowering9806f32e7eSjoerg   MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
9906f32e7eSjoerg     return MemberInfo(Offset, MemberInfo::Field, Data);
10006f32e7eSjoerg   }
10106f32e7eSjoerg 
10206f32e7eSjoerg   /// The Microsoft bitfield layout rule allocates discrete storage
10306f32e7eSjoerg   /// units of the field's formal type and only combines adjacent
10406f32e7eSjoerg   /// fields of the same formal type.  We want to emit a layout with
10506f32e7eSjoerg   /// these discrete storage units instead of combining them into a
10606f32e7eSjoerg   /// continuous run.
isDiscreteBitFieldABI__anonb225beed0111::CGRecordLowering10706f32e7eSjoerg   bool isDiscreteBitFieldABI() {
10806f32e7eSjoerg     return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
10906f32e7eSjoerg            D->isMsStruct(Context);
11006f32e7eSjoerg   }
11106f32e7eSjoerg 
112*13fbcb42Sjoerg   /// Helper function to check if we are targeting AAPCS.
isAAPCS__anonb225beed0111::CGRecordLowering113*13fbcb42Sjoerg   bool isAAPCS() const {
114*13fbcb42Sjoerg     return Context.getTargetInfo().getABI().startswith("aapcs");
115*13fbcb42Sjoerg   }
116*13fbcb42Sjoerg 
117*13fbcb42Sjoerg   /// Helper function to check if the target machine is BigEndian.
isBE__anonb225beed0111::CGRecordLowering118*13fbcb42Sjoerg   bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
119*13fbcb42Sjoerg 
12006f32e7eSjoerg   /// The Itanium base layout rule allows virtual bases to overlap
12106f32e7eSjoerg   /// other bases, which complicates layout in specific ways.
12206f32e7eSjoerg   ///
12306f32e7eSjoerg   /// Note specifically that the ms_struct attribute doesn't change this.
isOverlappingVBaseABI__anonb225beed0111::CGRecordLowering12406f32e7eSjoerg   bool isOverlappingVBaseABI() {
12506f32e7eSjoerg     return !Context.getTargetInfo().getCXXABI().isMicrosoft();
12606f32e7eSjoerg   }
12706f32e7eSjoerg 
12806f32e7eSjoerg   /// Wraps llvm::Type::getIntNTy with some implicit arguments.
getIntNType__anonb225beed0111::CGRecordLowering12906f32e7eSjoerg   llvm::Type *getIntNType(uint64_t NumBits) {
130*13fbcb42Sjoerg     unsigned AlignedBits = llvm::alignTo(NumBits, Context.getCharWidth());
131*13fbcb42Sjoerg     return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
13206f32e7eSjoerg   }
133*13fbcb42Sjoerg   /// Get the LLVM type sized as one character unit.
getCharType__anonb225beed0111::CGRecordLowering134*13fbcb42Sjoerg   llvm::Type *getCharType() {
135*13fbcb42Sjoerg     return llvm::Type::getIntNTy(Types.getLLVMContext(),
136*13fbcb42Sjoerg                                  Context.getCharWidth());
137*13fbcb42Sjoerg   }
138*13fbcb42Sjoerg   /// Gets an llvm type of size NumChars and alignment 1.
getByteArrayType__anonb225beed0111::CGRecordLowering139*13fbcb42Sjoerg   llvm::Type *getByteArrayType(CharUnits NumChars) {
140*13fbcb42Sjoerg     assert(!NumChars.isZero() && "Empty byte arrays aren't allowed.");
141*13fbcb42Sjoerg     llvm::Type *Type = getCharType();
142*13fbcb42Sjoerg     return NumChars == CharUnits::One() ? Type :
143*13fbcb42Sjoerg         (llvm::Type *)llvm::ArrayType::get(Type, NumChars.getQuantity());
14406f32e7eSjoerg   }
14506f32e7eSjoerg   /// Gets the storage type for a field decl and handles storage
14606f32e7eSjoerg   /// for itanium bitfields that are smaller than their declared type.
getStorageType__anonb225beed0111::CGRecordLowering14706f32e7eSjoerg   llvm::Type *getStorageType(const FieldDecl *FD) {
14806f32e7eSjoerg     llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
14906f32e7eSjoerg     if (!FD->isBitField()) return Type;
15006f32e7eSjoerg     if (isDiscreteBitFieldABI()) return Type;
15106f32e7eSjoerg     return getIntNType(std::min(FD->getBitWidthValue(Context),
15206f32e7eSjoerg                              (unsigned)Context.toBits(getSize(Type))));
15306f32e7eSjoerg   }
15406f32e7eSjoerg   /// Gets the llvm Basesubobject type from a CXXRecordDecl.
getStorageType__anonb225beed0111::CGRecordLowering15506f32e7eSjoerg   llvm::Type *getStorageType(const CXXRecordDecl *RD) {
15606f32e7eSjoerg     return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
15706f32e7eSjoerg   }
bitsToCharUnits__anonb225beed0111::CGRecordLowering15806f32e7eSjoerg   CharUnits bitsToCharUnits(uint64_t BitOffset) {
15906f32e7eSjoerg     return Context.toCharUnitsFromBits(BitOffset);
16006f32e7eSjoerg   }
getSize__anonb225beed0111::CGRecordLowering16106f32e7eSjoerg   CharUnits getSize(llvm::Type *Type) {
16206f32e7eSjoerg     return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
16306f32e7eSjoerg   }
getAlignment__anonb225beed0111::CGRecordLowering16406f32e7eSjoerg   CharUnits getAlignment(llvm::Type *Type) {
16506f32e7eSjoerg     return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
16606f32e7eSjoerg   }
isZeroInitializable__anonb225beed0111::CGRecordLowering16706f32e7eSjoerg   bool isZeroInitializable(const FieldDecl *FD) {
16806f32e7eSjoerg     return Types.isZeroInitializable(FD->getType());
16906f32e7eSjoerg   }
isZeroInitializable__anonb225beed0111::CGRecordLowering17006f32e7eSjoerg   bool isZeroInitializable(const RecordDecl *RD) {
17106f32e7eSjoerg     return Types.isZeroInitializable(RD);
17206f32e7eSjoerg   }
appendPaddingBytes__anonb225beed0111::CGRecordLowering17306f32e7eSjoerg   void appendPaddingBytes(CharUnits Size) {
17406f32e7eSjoerg     if (!Size.isZero())
17506f32e7eSjoerg       FieldTypes.push_back(getByteArrayType(Size));
17606f32e7eSjoerg   }
getFieldBitOffset__anonb225beed0111::CGRecordLowering17706f32e7eSjoerg   uint64_t getFieldBitOffset(const FieldDecl *FD) {
17806f32e7eSjoerg     return Layout.getFieldOffset(FD->getFieldIndex());
17906f32e7eSjoerg   }
18006f32e7eSjoerg   // Layout routines.
18106f32e7eSjoerg   void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
18206f32e7eSjoerg                        llvm::Type *StorageType);
18306f32e7eSjoerg   /// Lowers an ASTRecordLayout to a llvm type.
18406f32e7eSjoerg   void lower(bool NonVirtualBaseType);
18506f32e7eSjoerg   void lowerUnion();
18606f32e7eSjoerg   void accumulateFields();
18706f32e7eSjoerg   void accumulateBitFields(RecordDecl::field_iterator Field,
18806f32e7eSjoerg                            RecordDecl::field_iterator FieldEnd);
189*13fbcb42Sjoerg   void computeVolatileBitfields();
19006f32e7eSjoerg   void accumulateBases();
19106f32e7eSjoerg   void accumulateVPtrs();
19206f32e7eSjoerg   void accumulateVBases();
19306f32e7eSjoerg   /// Recursively searches all of the bases to find out if a vbase is
19406f32e7eSjoerg   /// not the primary vbase of some base class.
19506f32e7eSjoerg   bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
19606f32e7eSjoerg   void calculateZeroInit();
19706f32e7eSjoerg   /// Lowers bitfield storage types to I8 arrays for bitfields with tail
19806f32e7eSjoerg   /// padding that is or can potentially be used.
19906f32e7eSjoerg   void clipTailPadding();
20006f32e7eSjoerg   /// Determines if we need a packed llvm struct.
20106f32e7eSjoerg   void determinePacked(bool NVBaseType);
20206f32e7eSjoerg   /// Inserts padding everywhere it's needed.
20306f32e7eSjoerg   void insertPadding();
20406f32e7eSjoerg   /// Fills out the structures that are ultimately consumed.
20506f32e7eSjoerg   void fillOutputFields();
20606f32e7eSjoerg   // Input memoization fields.
20706f32e7eSjoerg   CodeGenTypes &Types;
20806f32e7eSjoerg   const ASTContext &Context;
20906f32e7eSjoerg   const RecordDecl *D;
21006f32e7eSjoerg   const CXXRecordDecl *RD;
21106f32e7eSjoerg   const ASTRecordLayout &Layout;
21206f32e7eSjoerg   const llvm::DataLayout &DataLayout;
21306f32e7eSjoerg   // Helpful intermediate data-structures.
21406f32e7eSjoerg   std::vector<MemberInfo> Members;
21506f32e7eSjoerg   // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
21606f32e7eSjoerg   SmallVector<llvm::Type *, 16> FieldTypes;
21706f32e7eSjoerg   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
21806f32e7eSjoerg   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
21906f32e7eSjoerg   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
22006f32e7eSjoerg   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
22106f32e7eSjoerg   bool IsZeroInitializable : 1;
22206f32e7eSjoerg   bool IsZeroInitializableAsBase : 1;
22306f32e7eSjoerg   bool Packed : 1;
22406f32e7eSjoerg private:
22506f32e7eSjoerg   CGRecordLowering(const CGRecordLowering &) = delete;
22606f32e7eSjoerg   void operator =(const CGRecordLowering &) = delete;
22706f32e7eSjoerg };
22806f32e7eSjoerg } // namespace {
22906f32e7eSjoerg 
CGRecordLowering(CodeGenTypes & Types,const RecordDecl * D,bool Packed)23006f32e7eSjoerg CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
23106f32e7eSjoerg                                    bool Packed)
23206f32e7eSjoerg     : Types(Types), Context(Types.getContext()), D(D),
23306f32e7eSjoerg       RD(dyn_cast<CXXRecordDecl>(D)),
23406f32e7eSjoerg       Layout(Types.getContext().getASTRecordLayout(D)),
23506f32e7eSjoerg       DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
23606f32e7eSjoerg       IsZeroInitializableAsBase(true), Packed(Packed) {}
23706f32e7eSjoerg 
setBitFieldInfo(const FieldDecl * FD,CharUnits StartOffset,llvm::Type * StorageType)23806f32e7eSjoerg void CGRecordLowering::setBitFieldInfo(
23906f32e7eSjoerg     const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
24006f32e7eSjoerg   CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
24106f32e7eSjoerg   Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
24206f32e7eSjoerg   Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
24306f32e7eSjoerg   Info.Size = FD->getBitWidthValue(Context);
24406f32e7eSjoerg   Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
24506f32e7eSjoerg   Info.StorageOffset = StartOffset;
24606f32e7eSjoerg   if (Info.Size > Info.StorageSize)
24706f32e7eSjoerg     Info.Size = Info.StorageSize;
24806f32e7eSjoerg   // Reverse the bit offsets for big endian machines. Because we represent
24906f32e7eSjoerg   // a bitfield as a single large integer load, we can imagine the bits
25006f32e7eSjoerg   // counting from the most-significant-bit instead of the
25106f32e7eSjoerg   // least-significant-bit.
25206f32e7eSjoerg   if (DataLayout.isBigEndian())
25306f32e7eSjoerg     Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
254*13fbcb42Sjoerg 
255*13fbcb42Sjoerg   Info.VolatileStorageSize = 0;
256*13fbcb42Sjoerg   Info.VolatileOffset = 0;
257*13fbcb42Sjoerg   Info.VolatileStorageOffset = CharUnits::Zero();
25806f32e7eSjoerg }
25906f32e7eSjoerg 
lower(bool NVBaseType)26006f32e7eSjoerg void CGRecordLowering::lower(bool NVBaseType) {
26106f32e7eSjoerg   // The lowering process implemented in this function takes a variety of
26206f32e7eSjoerg   // carefully ordered phases.
26306f32e7eSjoerg   // 1) Store all members (fields and bases) in a list and sort them by offset.
26406f32e7eSjoerg   // 2) Add a 1-byte capstone member at the Size of the structure.
26506f32e7eSjoerg   // 3) Clip bitfield storages members if their tail padding is or might be
26606f32e7eSjoerg   //    used by another field or base.  The clipping process uses the capstone
26706f32e7eSjoerg   //    by treating it as another object that occurs after the record.
26806f32e7eSjoerg   // 4) Determine if the llvm-struct requires packing.  It's important that this
26906f32e7eSjoerg   //    phase occur after clipping, because clipping changes the llvm type.
27006f32e7eSjoerg   //    This phase reads the offset of the capstone when determining packedness
27106f32e7eSjoerg   //    and updates the alignment of the capstone to be equal of the alignment
27206f32e7eSjoerg   //    of the record after doing so.
27306f32e7eSjoerg   // 5) Insert padding everywhere it is needed.  This phase requires 'Packed' to
27406f32e7eSjoerg   //    have been computed and needs to know the alignment of the record in
27506f32e7eSjoerg   //    order to understand if explicit tail padding is needed.
27606f32e7eSjoerg   // 6) Remove the capstone, we don't need it anymore.
27706f32e7eSjoerg   // 7) Determine if this record can be zero-initialized.  This phase could have
27806f32e7eSjoerg   //    been placed anywhere after phase 1.
27906f32e7eSjoerg   // 8) Format the complete list of members in a way that can be consumed by
28006f32e7eSjoerg   //    CodeGenTypes::ComputeRecordLayout.
28106f32e7eSjoerg   CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
282*13fbcb42Sjoerg   if (D->isUnion()) {
283*13fbcb42Sjoerg     lowerUnion();
284*13fbcb42Sjoerg     computeVolatileBitfields();
285*13fbcb42Sjoerg     return;
286*13fbcb42Sjoerg   }
28706f32e7eSjoerg   accumulateFields();
28806f32e7eSjoerg   // RD implies C++.
28906f32e7eSjoerg   if (RD) {
29006f32e7eSjoerg     accumulateVPtrs();
29106f32e7eSjoerg     accumulateBases();
292*13fbcb42Sjoerg     if (Members.empty()) {
293*13fbcb42Sjoerg       appendPaddingBytes(Size);
294*13fbcb42Sjoerg       computeVolatileBitfields();
295*13fbcb42Sjoerg       return;
296*13fbcb42Sjoerg     }
29706f32e7eSjoerg     if (!NVBaseType)
29806f32e7eSjoerg       accumulateVBases();
29906f32e7eSjoerg   }
30006f32e7eSjoerg   llvm::stable_sort(Members);
30106f32e7eSjoerg   Members.push_back(StorageInfo(Size, getIntNType(8)));
30206f32e7eSjoerg   clipTailPadding();
30306f32e7eSjoerg   determinePacked(NVBaseType);
30406f32e7eSjoerg   insertPadding();
30506f32e7eSjoerg   Members.pop_back();
30606f32e7eSjoerg   calculateZeroInit();
30706f32e7eSjoerg   fillOutputFields();
308*13fbcb42Sjoerg   computeVolatileBitfields();
30906f32e7eSjoerg }
31006f32e7eSjoerg 
lowerUnion()31106f32e7eSjoerg void CGRecordLowering::lowerUnion() {
31206f32e7eSjoerg   CharUnits LayoutSize = Layout.getSize();
31306f32e7eSjoerg   llvm::Type *StorageType = nullptr;
31406f32e7eSjoerg   bool SeenNamedMember = false;
31506f32e7eSjoerg   // Iterate through the fields setting bitFieldInfo and the Fields array. Also
31606f32e7eSjoerg   // locate the "most appropriate" storage type.  The heuristic for finding the
31706f32e7eSjoerg   // storage type isn't necessary, the first (non-0-length-bitfield) field's
31806f32e7eSjoerg   // type would work fine and be simpler but would be different than what we've
31906f32e7eSjoerg   // been doing and cause lit tests to change.
32006f32e7eSjoerg   for (const auto *Field : D->fields()) {
32106f32e7eSjoerg     if (Field->isBitField()) {
32206f32e7eSjoerg       if (Field->isZeroLengthBitField(Context))
32306f32e7eSjoerg         continue;
32406f32e7eSjoerg       llvm::Type *FieldType = getStorageType(Field);
32506f32e7eSjoerg       if (LayoutSize < getSize(FieldType))
32606f32e7eSjoerg         FieldType = getByteArrayType(LayoutSize);
32706f32e7eSjoerg       setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
32806f32e7eSjoerg     }
32906f32e7eSjoerg     Fields[Field->getCanonicalDecl()] = 0;
33006f32e7eSjoerg     llvm::Type *FieldType = getStorageType(Field);
33106f32e7eSjoerg     // Compute zero-initializable status.
33206f32e7eSjoerg     // This union might not be zero initialized: it may contain a pointer to
33306f32e7eSjoerg     // data member which might have some exotic initialization sequence.
33406f32e7eSjoerg     // If this is the case, then we aught not to try and come up with a "better"
33506f32e7eSjoerg     // type, it might not be very easy to come up with a Constant which
33606f32e7eSjoerg     // correctly initializes it.
33706f32e7eSjoerg     if (!SeenNamedMember) {
33806f32e7eSjoerg       SeenNamedMember = Field->getIdentifier();
33906f32e7eSjoerg       if (!SeenNamedMember)
34006f32e7eSjoerg         if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
34106f32e7eSjoerg           SeenNamedMember = FieldRD->findFirstNamedDataMember();
34206f32e7eSjoerg       if (SeenNamedMember && !isZeroInitializable(Field)) {
34306f32e7eSjoerg         IsZeroInitializable = IsZeroInitializableAsBase = false;
34406f32e7eSjoerg         StorageType = FieldType;
34506f32e7eSjoerg       }
34606f32e7eSjoerg     }
34706f32e7eSjoerg     // Because our union isn't zero initializable, we won't be getting a better
34806f32e7eSjoerg     // storage type.
34906f32e7eSjoerg     if (!IsZeroInitializable)
35006f32e7eSjoerg       continue;
35106f32e7eSjoerg     // Conditionally update our storage type if we've got a new "better" one.
35206f32e7eSjoerg     if (!StorageType ||
35306f32e7eSjoerg         getAlignment(FieldType) >  getAlignment(StorageType) ||
35406f32e7eSjoerg         (getAlignment(FieldType) == getAlignment(StorageType) &&
35506f32e7eSjoerg         getSize(FieldType) > getSize(StorageType)))
35606f32e7eSjoerg       StorageType = FieldType;
35706f32e7eSjoerg   }
35806f32e7eSjoerg   // If we have no storage type just pad to the appropriate size and return.
35906f32e7eSjoerg   if (!StorageType)
36006f32e7eSjoerg     return appendPaddingBytes(LayoutSize);
36106f32e7eSjoerg   // If our storage size was bigger than our required size (can happen in the
36206f32e7eSjoerg   // case of packed bitfields on Itanium) then just use an I8 array.
36306f32e7eSjoerg   if (LayoutSize < getSize(StorageType))
36406f32e7eSjoerg     StorageType = getByteArrayType(LayoutSize);
36506f32e7eSjoerg   FieldTypes.push_back(StorageType);
36606f32e7eSjoerg   appendPaddingBytes(LayoutSize - getSize(StorageType));
36706f32e7eSjoerg   // Set packed if we need it.
36806f32e7eSjoerg   if (LayoutSize % getAlignment(StorageType))
36906f32e7eSjoerg     Packed = true;
37006f32e7eSjoerg }
37106f32e7eSjoerg 
accumulateFields()37206f32e7eSjoerg void CGRecordLowering::accumulateFields() {
37306f32e7eSjoerg   for (RecordDecl::field_iterator Field = D->field_begin(),
37406f32e7eSjoerg                                   FieldEnd = D->field_end();
37506f32e7eSjoerg     Field != FieldEnd;) {
37606f32e7eSjoerg     if (Field->isBitField()) {
37706f32e7eSjoerg       RecordDecl::field_iterator Start = Field;
37806f32e7eSjoerg       // Iterate to gather the list of bitfields.
37906f32e7eSjoerg       for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
38006f32e7eSjoerg       accumulateBitFields(Start, Field);
38106f32e7eSjoerg     } else if (!Field->isZeroSize(Context)) {
38206f32e7eSjoerg       Members.push_back(MemberInfo(
38306f32e7eSjoerg           bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
38406f32e7eSjoerg           getStorageType(*Field), *Field));
38506f32e7eSjoerg       ++Field;
38606f32e7eSjoerg     } else {
38706f32e7eSjoerg       ++Field;
38806f32e7eSjoerg     }
38906f32e7eSjoerg   }
39006f32e7eSjoerg }
39106f32e7eSjoerg 
39206f32e7eSjoerg void
accumulateBitFields(RecordDecl::field_iterator Field,RecordDecl::field_iterator FieldEnd)39306f32e7eSjoerg CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
39406f32e7eSjoerg                                       RecordDecl::field_iterator FieldEnd) {
39506f32e7eSjoerg   // Run stores the first element of the current run of bitfields.  FieldEnd is
39606f32e7eSjoerg   // used as a special value to note that we don't have a current run.  A
39706f32e7eSjoerg   // bitfield run is a contiguous collection of bitfields that can be stored in
39806f32e7eSjoerg   // the same storage block.  Zero-sized bitfields and bitfields that would
39906f32e7eSjoerg   // cross an alignment boundary break a run and start a new one.
40006f32e7eSjoerg   RecordDecl::field_iterator Run = FieldEnd;
40106f32e7eSjoerg   // Tail is the offset of the first bit off the end of the current run.  It's
40206f32e7eSjoerg   // used to determine if the ASTRecordLayout is treating these two bitfields as
40306f32e7eSjoerg   // contiguous.  StartBitOffset is offset of the beginning of the Run.
40406f32e7eSjoerg   uint64_t StartBitOffset, Tail = 0;
40506f32e7eSjoerg   if (isDiscreteBitFieldABI()) {
40606f32e7eSjoerg     for (; Field != FieldEnd; ++Field) {
40706f32e7eSjoerg       uint64_t BitOffset = getFieldBitOffset(*Field);
40806f32e7eSjoerg       // Zero-width bitfields end runs.
40906f32e7eSjoerg       if (Field->isZeroLengthBitField(Context)) {
41006f32e7eSjoerg         Run = FieldEnd;
41106f32e7eSjoerg         continue;
41206f32e7eSjoerg       }
413*13fbcb42Sjoerg       llvm::Type *Type =
414*13fbcb42Sjoerg           Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
41506f32e7eSjoerg       // If we don't have a run yet, or don't live within the previous run's
41606f32e7eSjoerg       // allocated storage then we allocate some storage and start a new run.
41706f32e7eSjoerg       if (Run == FieldEnd || BitOffset >= Tail) {
41806f32e7eSjoerg         Run = Field;
41906f32e7eSjoerg         StartBitOffset = BitOffset;
42006f32e7eSjoerg         Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
42106f32e7eSjoerg         // Add the storage member to the record.  This must be added to the
42206f32e7eSjoerg         // record before the bitfield members so that it gets laid out before
42306f32e7eSjoerg         // the bitfields it contains get laid out.
42406f32e7eSjoerg         Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
42506f32e7eSjoerg       }
42606f32e7eSjoerg       // Bitfields get the offset of their storage but come afterward and remain
42706f32e7eSjoerg       // there after a stable sort.
42806f32e7eSjoerg       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
42906f32e7eSjoerg                                    MemberInfo::Field, nullptr, *Field));
43006f32e7eSjoerg     }
43106f32e7eSjoerg     return;
43206f32e7eSjoerg   }
43306f32e7eSjoerg 
434*13fbcb42Sjoerg   // Check if OffsetInRecord (the size in bits of the current run) is better
435*13fbcb42Sjoerg   // as a single field run. When OffsetInRecord has legal integer width, and
436*13fbcb42Sjoerg   // its bitfield offset is naturally aligned, it is better to make the
437*13fbcb42Sjoerg   // bitfield a separate storage component so as it can be accessed directly
438*13fbcb42Sjoerg   // with lower cost.
43906f32e7eSjoerg   auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
44006f32e7eSjoerg                                       uint64_t StartBitOffset) {
44106f32e7eSjoerg     if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
44206f32e7eSjoerg       return false;
443*13fbcb42Sjoerg     if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
444*13fbcb42Sjoerg         !DataLayout.fitsInLegalInteger(OffsetInRecord))
44506f32e7eSjoerg       return false;
446*13fbcb42Sjoerg     // Make sure StartBitOffset is naturally aligned if it is treated as an
44706f32e7eSjoerg     // IType integer.
44806f32e7eSjoerg     if (StartBitOffset %
44906f32e7eSjoerg             Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
45006f32e7eSjoerg         0)
45106f32e7eSjoerg       return false;
45206f32e7eSjoerg     return true;
45306f32e7eSjoerg   };
45406f32e7eSjoerg 
45506f32e7eSjoerg   // The start field is better as a single field run.
45606f32e7eSjoerg   bool StartFieldAsSingleRun = false;
45706f32e7eSjoerg   for (;;) {
45806f32e7eSjoerg     // Check to see if we need to start a new run.
45906f32e7eSjoerg     if (Run == FieldEnd) {
46006f32e7eSjoerg       // If we're out of fields, return.
46106f32e7eSjoerg       if (Field == FieldEnd)
46206f32e7eSjoerg         break;
46306f32e7eSjoerg       // Any non-zero-length bitfield can start a new run.
46406f32e7eSjoerg       if (!Field->isZeroLengthBitField(Context)) {
46506f32e7eSjoerg         Run = Field;
46606f32e7eSjoerg         StartBitOffset = getFieldBitOffset(*Field);
46706f32e7eSjoerg         Tail = StartBitOffset + Field->getBitWidthValue(Context);
46806f32e7eSjoerg         StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
46906f32e7eSjoerg                                                          StartBitOffset);
47006f32e7eSjoerg       }
47106f32e7eSjoerg       ++Field;
47206f32e7eSjoerg       continue;
47306f32e7eSjoerg     }
47406f32e7eSjoerg 
47506f32e7eSjoerg     // If the start field of a new run is better as a single run, or
47606f32e7eSjoerg     // if current field (or consecutive fields) is better as a single run, or
47706f32e7eSjoerg     // if current field has zero width bitfield and either
47806f32e7eSjoerg     // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
47906f32e7eSjoerg     // true, or
48006f32e7eSjoerg     // if the offset of current field is inconsistent with the offset of
48106f32e7eSjoerg     // previous field plus its offset,
48206f32e7eSjoerg     // skip the block below and go ahead to emit the storage.
48306f32e7eSjoerg     // Otherwise, try to add bitfields to the run.
48406f32e7eSjoerg     if (!StartFieldAsSingleRun && Field != FieldEnd &&
48506f32e7eSjoerg         !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
48606f32e7eSjoerg         (!Field->isZeroLengthBitField(Context) ||
48706f32e7eSjoerg          (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
48806f32e7eSjoerg           !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
48906f32e7eSjoerg         Tail == getFieldBitOffset(*Field)) {
49006f32e7eSjoerg       Tail += Field->getBitWidthValue(Context);
49106f32e7eSjoerg       ++Field;
49206f32e7eSjoerg       continue;
49306f32e7eSjoerg     }
49406f32e7eSjoerg 
49506f32e7eSjoerg     // We've hit a break-point in the run and need to emit a storage field.
49606f32e7eSjoerg     llvm::Type *Type = getIntNType(Tail - StartBitOffset);
49706f32e7eSjoerg     // Add the storage member to the record and set the bitfield info for all of
49806f32e7eSjoerg     // the bitfields in the run.  Bitfields get the offset of their storage but
49906f32e7eSjoerg     // come afterward and remain there after a stable sort.
50006f32e7eSjoerg     Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
50106f32e7eSjoerg     for (; Run != Field; ++Run)
50206f32e7eSjoerg       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
50306f32e7eSjoerg                                    MemberInfo::Field, nullptr, *Run));
50406f32e7eSjoerg     Run = FieldEnd;
50506f32e7eSjoerg     StartFieldAsSingleRun = false;
50606f32e7eSjoerg   }
50706f32e7eSjoerg }
50806f32e7eSjoerg 
accumulateBases()50906f32e7eSjoerg void CGRecordLowering::accumulateBases() {
51006f32e7eSjoerg   // If we've got a primary virtual base, we need to add it with the bases.
51106f32e7eSjoerg   if (Layout.isPrimaryBaseVirtual()) {
51206f32e7eSjoerg     const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
51306f32e7eSjoerg     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
51406f32e7eSjoerg                                  getStorageType(BaseDecl), BaseDecl));
51506f32e7eSjoerg   }
51606f32e7eSjoerg   // Accumulate the non-virtual bases.
51706f32e7eSjoerg   for (const auto &Base : RD->bases()) {
51806f32e7eSjoerg     if (Base.isVirtual())
51906f32e7eSjoerg       continue;
52006f32e7eSjoerg 
52106f32e7eSjoerg     // Bases can be zero-sized even if not technically empty if they
52206f32e7eSjoerg     // contain only a trailing array member.
52306f32e7eSjoerg     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
52406f32e7eSjoerg     if (!BaseDecl->isEmpty() &&
52506f32e7eSjoerg         !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
52606f32e7eSjoerg       Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
52706f32e7eSjoerg           MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
52806f32e7eSjoerg   }
52906f32e7eSjoerg }
53006f32e7eSjoerg 
531*13fbcb42Sjoerg /// The AAPCS that defines that, when possible, bit-fields should
532*13fbcb42Sjoerg /// be accessed using containers of the declared type width:
533*13fbcb42Sjoerg /// When a volatile bit-field is read, and its container does not overlap with
534*13fbcb42Sjoerg /// any non-bit-field member or any zero length bit-field member, its container
535*13fbcb42Sjoerg /// must be read exactly once using the access width appropriate to the type of
536*13fbcb42Sjoerg /// the container. When a volatile bit-field is written, and its container does
537*13fbcb42Sjoerg /// not overlap with any non-bit-field member or any zero-length bit-field
538*13fbcb42Sjoerg /// member, its container must be read exactly once and written exactly once
539*13fbcb42Sjoerg /// using the access width appropriate to the type of the container. The two
540*13fbcb42Sjoerg /// accesses are not atomic.
541*13fbcb42Sjoerg ///
542*13fbcb42Sjoerg /// Enforcing the width restriction can be disabled using
543*13fbcb42Sjoerg /// -fno-aapcs-bitfield-width.
computeVolatileBitfields()544*13fbcb42Sjoerg void CGRecordLowering::computeVolatileBitfields() {
545*13fbcb42Sjoerg   if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
546*13fbcb42Sjoerg     return;
547*13fbcb42Sjoerg 
548*13fbcb42Sjoerg   for (auto &I : BitFields) {
549*13fbcb42Sjoerg     const FieldDecl *Field = I.first;
550*13fbcb42Sjoerg     CGBitFieldInfo &Info = I.second;
551*13fbcb42Sjoerg     llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
552*13fbcb42Sjoerg     // If the record alignment is less than the type width, we can't enforce a
553*13fbcb42Sjoerg     // aligned load, bail out.
554*13fbcb42Sjoerg     if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
555*13fbcb42Sjoerg         ResLTy->getPrimitiveSizeInBits())
556*13fbcb42Sjoerg       continue;
557*13fbcb42Sjoerg     // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
558*13fbcb42Sjoerg     // for big-endian targets, but it assumes a container of width
559*13fbcb42Sjoerg     // Info.StorageSize. Since AAPCS uses a different container size (width
560*13fbcb42Sjoerg     // of the type), we first undo that calculation here and redo it once
561*13fbcb42Sjoerg     // the bit-field offset within the new container is calculated.
562*13fbcb42Sjoerg     const unsigned OldOffset =
563*13fbcb42Sjoerg         isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
564*13fbcb42Sjoerg     // Offset to the bit-field from the beginning of the struct.
565*13fbcb42Sjoerg     const unsigned AbsoluteOffset =
566*13fbcb42Sjoerg         Context.toBits(Info.StorageOffset) + OldOffset;
567*13fbcb42Sjoerg 
568*13fbcb42Sjoerg     // Container size is the width of the bit-field type.
569*13fbcb42Sjoerg     const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
570*13fbcb42Sjoerg     // Nothing to do if the access uses the desired
571*13fbcb42Sjoerg     // container width and is naturally aligned.
572*13fbcb42Sjoerg     if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
573*13fbcb42Sjoerg       continue;
574*13fbcb42Sjoerg 
575*13fbcb42Sjoerg     // Offset within the container.
576*13fbcb42Sjoerg     unsigned Offset = AbsoluteOffset & (StorageSize - 1);
577*13fbcb42Sjoerg     // Bail out if an aligned load of the container cannot cover the entire
578*13fbcb42Sjoerg     // bit-field. This can happen for example, if the bit-field is part of a
579*13fbcb42Sjoerg     // packed struct. AAPCS does not define access rules for such cases, we let
580*13fbcb42Sjoerg     // clang to follow its own rules.
581*13fbcb42Sjoerg     if (Offset + Info.Size > StorageSize)
582*13fbcb42Sjoerg       continue;
583*13fbcb42Sjoerg 
584*13fbcb42Sjoerg     // Re-adjust offsets for big-endian targets.
585*13fbcb42Sjoerg     if (isBE())
586*13fbcb42Sjoerg       Offset = StorageSize - (Offset + Info.Size);
587*13fbcb42Sjoerg 
588*13fbcb42Sjoerg     const CharUnits StorageOffset =
589*13fbcb42Sjoerg         Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
590*13fbcb42Sjoerg     const CharUnits End = StorageOffset +
591*13fbcb42Sjoerg                           Context.toCharUnitsFromBits(StorageSize) -
592*13fbcb42Sjoerg                           CharUnits::One();
593*13fbcb42Sjoerg 
594*13fbcb42Sjoerg     const ASTRecordLayout &Layout =
595*13fbcb42Sjoerg         Context.getASTRecordLayout(Field->getParent());
596*13fbcb42Sjoerg     // If we access outside memory outside the record, than bail out.
597*13fbcb42Sjoerg     const CharUnits RecordSize = Layout.getSize();
598*13fbcb42Sjoerg     if (End >= RecordSize)
599*13fbcb42Sjoerg       continue;
600*13fbcb42Sjoerg 
601*13fbcb42Sjoerg     // Bail out if performing this load would access non-bit-fields members.
602*13fbcb42Sjoerg     bool Conflict = false;
603*13fbcb42Sjoerg     for (const auto *F : D->fields()) {
604*13fbcb42Sjoerg       // Allow sized bit-fields overlaps.
605*13fbcb42Sjoerg       if (F->isBitField() && !F->isZeroLengthBitField(Context))
606*13fbcb42Sjoerg         continue;
607*13fbcb42Sjoerg 
608*13fbcb42Sjoerg       const CharUnits FOffset = Context.toCharUnitsFromBits(
609*13fbcb42Sjoerg           Layout.getFieldOffset(F->getFieldIndex()));
610*13fbcb42Sjoerg 
611*13fbcb42Sjoerg       // As C11 defines, a zero sized bit-field defines a barrier, so
612*13fbcb42Sjoerg       // fields after and before it should be race condition free.
613*13fbcb42Sjoerg       // The AAPCS acknowledges it and imposes no restritions when the
614*13fbcb42Sjoerg       // natural container overlaps a zero-length bit-field.
615*13fbcb42Sjoerg       if (F->isZeroLengthBitField(Context)) {
616*13fbcb42Sjoerg         if (End > FOffset && StorageOffset < FOffset) {
617*13fbcb42Sjoerg           Conflict = true;
618*13fbcb42Sjoerg           break;
619*13fbcb42Sjoerg         }
620*13fbcb42Sjoerg       }
621*13fbcb42Sjoerg 
622*13fbcb42Sjoerg       const CharUnits FEnd =
623*13fbcb42Sjoerg           FOffset +
624*13fbcb42Sjoerg           Context.toCharUnitsFromBits(
625*13fbcb42Sjoerg               Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
626*13fbcb42Sjoerg           CharUnits::One();
627*13fbcb42Sjoerg       // If no overlap, continue.
628*13fbcb42Sjoerg       if (End < FOffset || FEnd < StorageOffset)
629*13fbcb42Sjoerg         continue;
630*13fbcb42Sjoerg 
631*13fbcb42Sjoerg       // The desired load overlaps a non-bit-field member, bail out.
632*13fbcb42Sjoerg       Conflict = true;
633*13fbcb42Sjoerg       break;
634*13fbcb42Sjoerg     }
635*13fbcb42Sjoerg 
636*13fbcb42Sjoerg     if (Conflict)
637*13fbcb42Sjoerg       continue;
638*13fbcb42Sjoerg     // Write the new bit-field access parameters.
639*13fbcb42Sjoerg     // As the storage offset now is defined as the number of elements from the
640*13fbcb42Sjoerg     // start of the structure, we should divide the Offset by the element size.
641*13fbcb42Sjoerg     Info.VolatileStorageOffset =
642*13fbcb42Sjoerg         StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
643*13fbcb42Sjoerg     Info.VolatileStorageSize = StorageSize;
644*13fbcb42Sjoerg     Info.VolatileOffset = Offset;
645*13fbcb42Sjoerg   }
646*13fbcb42Sjoerg }
647*13fbcb42Sjoerg 
accumulateVPtrs()64806f32e7eSjoerg void CGRecordLowering::accumulateVPtrs() {
64906f32e7eSjoerg   if (Layout.hasOwnVFPtr())
65006f32e7eSjoerg     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
65106f32e7eSjoerg         llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
65206f32e7eSjoerg             getPointerTo()->getPointerTo()));
65306f32e7eSjoerg   if (Layout.hasOwnVBPtr())
65406f32e7eSjoerg     Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
65506f32e7eSjoerg         llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
65606f32e7eSjoerg }
65706f32e7eSjoerg 
accumulateVBases()65806f32e7eSjoerg void CGRecordLowering::accumulateVBases() {
65906f32e7eSjoerg   CharUnits ScissorOffset = Layout.getNonVirtualSize();
66006f32e7eSjoerg   // In the itanium ABI, it's possible to place a vbase at a dsize that is
66106f32e7eSjoerg   // smaller than the nvsize.  Here we check to see if such a base is placed
66206f32e7eSjoerg   // before the nvsize and set the scissor offset to that, instead of the
66306f32e7eSjoerg   // nvsize.
66406f32e7eSjoerg   if (isOverlappingVBaseABI())
66506f32e7eSjoerg     for (const auto &Base : RD->vbases()) {
66606f32e7eSjoerg       const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
66706f32e7eSjoerg       if (BaseDecl->isEmpty())
66806f32e7eSjoerg         continue;
66906f32e7eSjoerg       // If the vbase is a primary virtual base of some base, then it doesn't
67006f32e7eSjoerg       // get its own storage location but instead lives inside of that base.
67106f32e7eSjoerg       if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
67206f32e7eSjoerg         continue;
67306f32e7eSjoerg       ScissorOffset = std::min(ScissorOffset,
67406f32e7eSjoerg                                Layout.getVBaseClassOffset(BaseDecl));
67506f32e7eSjoerg     }
67606f32e7eSjoerg   Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
67706f32e7eSjoerg                                RD));
67806f32e7eSjoerg   for (const auto &Base : RD->vbases()) {
67906f32e7eSjoerg     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
68006f32e7eSjoerg     if (BaseDecl->isEmpty())
68106f32e7eSjoerg       continue;
68206f32e7eSjoerg     CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
68306f32e7eSjoerg     // If the vbase is a primary virtual base of some base, then it doesn't
68406f32e7eSjoerg     // get its own storage location but instead lives inside of that base.
68506f32e7eSjoerg     if (isOverlappingVBaseABI() &&
68606f32e7eSjoerg         Context.isNearlyEmpty(BaseDecl) &&
68706f32e7eSjoerg         !hasOwnStorage(RD, BaseDecl)) {
68806f32e7eSjoerg       Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
68906f32e7eSjoerg                                    BaseDecl));
69006f32e7eSjoerg       continue;
69106f32e7eSjoerg     }
69206f32e7eSjoerg     // If we've got a vtordisp, add it as a storage type.
69306f32e7eSjoerg     if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
69406f32e7eSjoerg       Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
69506f32e7eSjoerg                                     getIntNType(32)));
69606f32e7eSjoerg     Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
69706f32e7eSjoerg                                  getStorageType(BaseDecl), BaseDecl));
69806f32e7eSjoerg   }
69906f32e7eSjoerg }
70006f32e7eSjoerg 
hasOwnStorage(const CXXRecordDecl * Decl,const CXXRecordDecl * Query)70106f32e7eSjoerg bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
70206f32e7eSjoerg                                      const CXXRecordDecl *Query) {
70306f32e7eSjoerg   const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
70406f32e7eSjoerg   if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
70506f32e7eSjoerg     return false;
70606f32e7eSjoerg   for (const auto &Base : Decl->bases())
70706f32e7eSjoerg     if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
70806f32e7eSjoerg       return false;
70906f32e7eSjoerg   return true;
71006f32e7eSjoerg }
71106f32e7eSjoerg 
calculateZeroInit()71206f32e7eSjoerg void CGRecordLowering::calculateZeroInit() {
71306f32e7eSjoerg   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
71406f32e7eSjoerg                                                MemberEnd = Members.end();
71506f32e7eSjoerg        IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
71606f32e7eSjoerg     if (Member->Kind == MemberInfo::Field) {
71706f32e7eSjoerg       if (!Member->FD || isZeroInitializable(Member->FD))
71806f32e7eSjoerg         continue;
71906f32e7eSjoerg       IsZeroInitializable = IsZeroInitializableAsBase = false;
72006f32e7eSjoerg     } else if (Member->Kind == MemberInfo::Base ||
72106f32e7eSjoerg                Member->Kind == MemberInfo::VBase) {
72206f32e7eSjoerg       if (isZeroInitializable(Member->RD))
72306f32e7eSjoerg         continue;
72406f32e7eSjoerg       IsZeroInitializable = false;
72506f32e7eSjoerg       if (Member->Kind == MemberInfo::Base)
72606f32e7eSjoerg         IsZeroInitializableAsBase = false;
72706f32e7eSjoerg     }
72806f32e7eSjoerg   }
72906f32e7eSjoerg }
73006f32e7eSjoerg 
clipTailPadding()73106f32e7eSjoerg void CGRecordLowering::clipTailPadding() {
73206f32e7eSjoerg   std::vector<MemberInfo>::iterator Prior = Members.begin();
73306f32e7eSjoerg   CharUnits Tail = getSize(Prior->Data);
73406f32e7eSjoerg   for (std::vector<MemberInfo>::iterator Member = Prior + 1,
73506f32e7eSjoerg                                          MemberEnd = Members.end();
73606f32e7eSjoerg        Member != MemberEnd; ++Member) {
73706f32e7eSjoerg     // Only members with data and the scissor can cut into tail padding.
73806f32e7eSjoerg     if (!Member->Data && Member->Kind != MemberInfo::Scissor)
73906f32e7eSjoerg       continue;
74006f32e7eSjoerg     if (Member->Offset < Tail) {
74106f32e7eSjoerg       assert(Prior->Kind == MemberInfo::Field &&
74206f32e7eSjoerg              "Only storage fields have tail padding!");
74306f32e7eSjoerg       if (!Prior->FD || Prior->FD->isBitField())
74406f32e7eSjoerg         Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
74506f32e7eSjoerg             cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
74606f32e7eSjoerg       else {
74706f32e7eSjoerg         assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
74806f32e7eSjoerg                "should not have reused this field's tail padding");
74906f32e7eSjoerg         Prior->Data = getByteArrayType(
750*13fbcb42Sjoerg             Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
75106f32e7eSjoerg       }
75206f32e7eSjoerg     }
75306f32e7eSjoerg     if (Member->Data)
75406f32e7eSjoerg       Prior = Member;
75506f32e7eSjoerg     Tail = Prior->Offset + getSize(Prior->Data);
75606f32e7eSjoerg   }
75706f32e7eSjoerg }
75806f32e7eSjoerg 
determinePacked(bool NVBaseType)75906f32e7eSjoerg void CGRecordLowering::determinePacked(bool NVBaseType) {
76006f32e7eSjoerg   if (Packed)
76106f32e7eSjoerg     return;
76206f32e7eSjoerg   CharUnits Alignment = CharUnits::One();
76306f32e7eSjoerg   CharUnits NVAlignment = CharUnits::One();
76406f32e7eSjoerg   CharUnits NVSize =
76506f32e7eSjoerg       !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
76606f32e7eSjoerg   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
76706f32e7eSjoerg                                                MemberEnd = Members.end();
76806f32e7eSjoerg        Member != MemberEnd; ++Member) {
76906f32e7eSjoerg     if (!Member->Data)
77006f32e7eSjoerg       continue;
77106f32e7eSjoerg     // If any member falls at an offset that it not a multiple of its alignment,
77206f32e7eSjoerg     // then the entire record must be packed.
77306f32e7eSjoerg     if (Member->Offset % getAlignment(Member->Data))
77406f32e7eSjoerg       Packed = true;
77506f32e7eSjoerg     if (Member->Offset < NVSize)
77606f32e7eSjoerg       NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
77706f32e7eSjoerg     Alignment = std::max(Alignment, getAlignment(Member->Data));
77806f32e7eSjoerg   }
77906f32e7eSjoerg   // If the size of the record (the capstone's offset) is not a multiple of the
78006f32e7eSjoerg   // record's alignment, it must be packed.
78106f32e7eSjoerg   if (Members.back().Offset % Alignment)
78206f32e7eSjoerg     Packed = true;
78306f32e7eSjoerg   // If the non-virtual sub-object is not a multiple of the non-virtual
78406f32e7eSjoerg   // sub-object's alignment, it must be packed.  We cannot have a packed
78506f32e7eSjoerg   // non-virtual sub-object and an unpacked complete object or vise versa.
78606f32e7eSjoerg   if (NVSize % NVAlignment)
78706f32e7eSjoerg     Packed = true;
78806f32e7eSjoerg   // Update the alignment of the sentinel.
78906f32e7eSjoerg   if (!Packed)
79006f32e7eSjoerg     Members.back().Data = getIntNType(Context.toBits(Alignment));
79106f32e7eSjoerg }
79206f32e7eSjoerg 
insertPadding()79306f32e7eSjoerg void CGRecordLowering::insertPadding() {
79406f32e7eSjoerg   std::vector<std::pair<CharUnits, CharUnits> > Padding;
79506f32e7eSjoerg   CharUnits Size = CharUnits::Zero();
79606f32e7eSjoerg   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
79706f32e7eSjoerg                                                MemberEnd = Members.end();
79806f32e7eSjoerg        Member != MemberEnd; ++Member) {
79906f32e7eSjoerg     if (!Member->Data)
80006f32e7eSjoerg       continue;
80106f32e7eSjoerg     CharUnits Offset = Member->Offset;
80206f32e7eSjoerg     assert(Offset >= Size);
80306f32e7eSjoerg     // Insert padding if we need to.
80406f32e7eSjoerg     if (Offset !=
80506f32e7eSjoerg         Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
80606f32e7eSjoerg       Padding.push_back(std::make_pair(Size, Offset - Size));
80706f32e7eSjoerg     Size = Offset + getSize(Member->Data);
80806f32e7eSjoerg   }
80906f32e7eSjoerg   if (Padding.empty())
81006f32e7eSjoerg     return;
81106f32e7eSjoerg   // Add the padding to the Members list and sort it.
81206f32e7eSjoerg   for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
81306f32e7eSjoerg         Pad = Padding.begin(), PadEnd = Padding.end();
81406f32e7eSjoerg         Pad != PadEnd; ++Pad)
81506f32e7eSjoerg     Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
81606f32e7eSjoerg   llvm::stable_sort(Members);
81706f32e7eSjoerg }
81806f32e7eSjoerg 
fillOutputFields()81906f32e7eSjoerg void CGRecordLowering::fillOutputFields() {
82006f32e7eSjoerg   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
82106f32e7eSjoerg                                                MemberEnd = Members.end();
82206f32e7eSjoerg        Member != MemberEnd; ++Member) {
82306f32e7eSjoerg     if (Member->Data)
82406f32e7eSjoerg       FieldTypes.push_back(Member->Data);
82506f32e7eSjoerg     if (Member->Kind == MemberInfo::Field) {
82606f32e7eSjoerg       if (Member->FD)
82706f32e7eSjoerg         Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
82806f32e7eSjoerg       // A field without storage must be a bitfield.
82906f32e7eSjoerg       if (!Member->Data)
83006f32e7eSjoerg         setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
83106f32e7eSjoerg     } else if (Member->Kind == MemberInfo::Base)
83206f32e7eSjoerg       NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
83306f32e7eSjoerg     else if (Member->Kind == MemberInfo::VBase)
83406f32e7eSjoerg       VirtualBases[Member->RD] = FieldTypes.size() - 1;
83506f32e7eSjoerg   }
83606f32e7eSjoerg }
83706f32e7eSjoerg 
MakeInfo(CodeGenTypes & Types,const FieldDecl * FD,uint64_t Offset,uint64_t Size,uint64_t StorageSize,CharUnits StorageOffset)83806f32e7eSjoerg CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
83906f32e7eSjoerg                                         const FieldDecl *FD,
84006f32e7eSjoerg                                         uint64_t Offset, uint64_t Size,
84106f32e7eSjoerg                                         uint64_t StorageSize,
84206f32e7eSjoerg                                         CharUnits StorageOffset) {
84306f32e7eSjoerg   // This function is vestigial from CGRecordLayoutBuilder days but is still
84406f32e7eSjoerg   // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
84506f32e7eSjoerg   // when addressed will allow for the removal of this function.
84606f32e7eSjoerg   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
84706f32e7eSjoerg   CharUnits TypeSizeInBytes =
84806f32e7eSjoerg     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
84906f32e7eSjoerg   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
85006f32e7eSjoerg 
85106f32e7eSjoerg   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
85206f32e7eSjoerg 
85306f32e7eSjoerg   if (Size > TypeSizeInBits) {
85406f32e7eSjoerg     // We have a wide bit-field. The extra bits are only used for padding, so
85506f32e7eSjoerg     // if we have a bitfield of type T, with size N:
85606f32e7eSjoerg     //
85706f32e7eSjoerg     // T t : N;
85806f32e7eSjoerg     //
85906f32e7eSjoerg     // We can just assume that it's:
86006f32e7eSjoerg     //
86106f32e7eSjoerg     // T t : sizeof(T);
86206f32e7eSjoerg     //
86306f32e7eSjoerg     Size = TypeSizeInBits;
86406f32e7eSjoerg   }
86506f32e7eSjoerg 
86606f32e7eSjoerg   // Reverse the bit offsets for big endian machines. Because we represent
86706f32e7eSjoerg   // a bitfield as a single large integer load, we can imagine the bits
86806f32e7eSjoerg   // counting from the most-significant-bit instead of the
86906f32e7eSjoerg   // least-significant-bit.
87006f32e7eSjoerg   if (Types.getDataLayout().isBigEndian()) {
87106f32e7eSjoerg     Offset = StorageSize - (Offset + Size);
87206f32e7eSjoerg   }
87306f32e7eSjoerg 
87406f32e7eSjoerg   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
87506f32e7eSjoerg }
87606f32e7eSjoerg 
877*13fbcb42Sjoerg std::unique_ptr<CGRecordLayout>
ComputeRecordLayout(const RecordDecl * D,llvm::StructType * Ty)878*13fbcb42Sjoerg CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
87906f32e7eSjoerg   CGRecordLowering Builder(*this, D, /*Packed=*/false);
88006f32e7eSjoerg 
88106f32e7eSjoerg   Builder.lower(/*NonVirtualBaseType=*/false);
88206f32e7eSjoerg 
88306f32e7eSjoerg   // If we're in C++, compute the base subobject type.
88406f32e7eSjoerg   llvm::StructType *BaseTy = nullptr;
88506f32e7eSjoerg   if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
88606f32e7eSjoerg     BaseTy = Ty;
88706f32e7eSjoerg     if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
88806f32e7eSjoerg       CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
88906f32e7eSjoerg       BaseBuilder.lower(/*NonVirtualBaseType=*/true);
89006f32e7eSjoerg       BaseTy = llvm::StructType::create(
89106f32e7eSjoerg           getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
89206f32e7eSjoerg       addRecordTypeName(D, BaseTy, ".base");
89306f32e7eSjoerg       // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
89406f32e7eSjoerg       // on both of them with the same index.
89506f32e7eSjoerg       assert(Builder.Packed == BaseBuilder.Packed &&
89606f32e7eSjoerg              "Non-virtual and complete types must agree on packedness");
89706f32e7eSjoerg     }
89806f32e7eSjoerg   }
89906f32e7eSjoerg 
90006f32e7eSjoerg   // Fill in the struct *after* computing the base type.  Filling in the body
90106f32e7eSjoerg   // signifies that the type is no longer opaque and record layout is complete,
90206f32e7eSjoerg   // but we may need to recursively layout D while laying D out as a base type.
90306f32e7eSjoerg   Ty->setBody(Builder.FieldTypes, Builder.Packed);
90406f32e7eSjoerg 
905*13fbcb42Sjoerg   auto RL = std::make_unique<CGRecordLayout>(
906*13fbcb42Sjoerg       Ty, BaseTy, (bool)Builder.IsZeroInitializable,
907*13fbcb42Sjoerg       (bool)Builder.IsZeroInitializableAsBase);
90806f32e7eSjoerg 
90906f32e7eSjoerg   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
91006f32e7eSjoerg   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
91106f32e7eSjoerg 
91206f32e7eSjoerg   // Add all the field numbers.
91306f32e7eSjoerg   RL->FieldInfo.swap(Builder.Fields);
91406f32e7eSjoerg 
91506f32e7eSjoerg   // Add bitfield info.
91606f32e7eSjoerg   RL->BitFields.swap(Builder.BitFields);
91706f32e7eSjoerg 
91806f32e7eSjoerg   // Dump the layout, if requested.
91906f32e7eSjoerg   if (getContext().getLangOpts().DumpRecordLayouts) {
92006f32e7eSjoerg     llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
92106f32e7eSjoerg     llvm::outs() << "Record: ";
92206f32e7eSjoerg     D->dump(llvm::outs());
92306f32e7eSjoerg     llvm::outs() << "\nLayout: ";
92406f32e7eSjoerg     RL->print(llvm::outs());
92506f32e7eSjoerg   }
92606f32e7eSjoerg 
92706f32e7eSjoerg #ifndef NDEBUG
92806f32e7eSjoerg   // Verify that the computed LLVM struct size matches the AST layout size.
92906f32e7eSjoerg   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
93006f32e7eSjoerg 
93106f32e7eSjoerg   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
93206f32e7eSjoerg   assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
93306f32e7eSjoerg          "Type size mismatch!");
93406f32e7eSjoerg 
93506f32e7eSjoerg   if (BaseTy) {
93606f32e7eSjoerg     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
93706f32e7eSjoerg 
93806f32e7eSjoerg     uint64_t AlignedNonVirtualTypeSizeInBits =
93906f32e7eSjoerg       getContext().toBits(NonVirtualSize);
94006f32e7eSjoerg 
94106f32e7eSjoerg     assert(AlignedNonVirtualTypeSizeInBits ==
94206f32e7eSjoerg            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
94306f32e7eSjoerg            "Type size mismatch!");
94406f32e7eSjoerg   }
94506f32e7eSjoerg 
94606f32e7eSjoerg   // Verify that the LLVM and AST field offsets agree.
94706f32e7eSjoerg   llvm::StructType *ST = RL->getLLVMType();
94806f32e7eSjoerg   const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
94906f32e7eSjoerg 
95006f32e7eSjoerg   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
95106f32e7eSjoerg   RecordDecl::field_iterator it = D->field_begin();
95206f32e7eSjoerg   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
95306f32e7eSjoerg     const FieldDecl *FD = *it;
95406f32e7eSjoerg 
95506f32e7eSjoerg     // Ignore zero-sized fields.
95606f32e7eSjoerg     if (FD->isZeroSize(getContext()))
95706f32e7eSjoerg       continue;
95806f32e7eSjoerg 
95906f32e7eSjoerg     // For non-bit-fields, just check that the LLVM struct offset matches the
96006f32e7eSjoerg     // AST offset.
96106f32e7eSjoerg     if (!FD->isBitField()) {
96206f32e7eSjoerg       unsigned FieldNo = RL->getLLVMFieldNo(FD);
96306f32e7eSjoerg       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
96406f32e7eSjoerg              "Invalid field offset!");
96506f32e7eSjoerg       continue;
96606f32e7eSjoerg     }
96706f32e7eSjoerg 
96806f32e7eSjoerg     // Ignore unnamed bit-fields.
96906f32e7eSjoerg     if (!FD->getDeclName())
97006f32e7eSjoerg       continue;
97106f32e7eSjoerg 
97206f32e7eSjoerg     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
97306f32e7eSjoerg     llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
97406f32e7eSjoerg 
97506f32e7eSjoerg     // Unions have overlapping elements dictating their layout, but for
97606f32e7eSjoerg     // non-unions we can verify that this section of the layout is the exact
97706f32e7eSjoerg     // expected size.
97806f32e7eSjoerg     if (D->isUnion()) {
97906f32e7eSjoerg       // For unions we verify that the start is zero and the size
98006f32e7eSjoerg       // is in-bounds. However, on BE systems, the offset may be non-zero, but
98106f32e7eSjoerg       // the size + offset should match the storage size in that case as it
98206f32e7eSjoerg       // "starts" at the back.
98306f32e7eSjoerg       if (getDataLayout().isBigEndian())
98406f32e7eSjoerg         assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
98506f32e7eSjoerg                Info.StorageSize &&
98606f32e7eSjoerg                "Big endian union bitfield does not end at the back");
98706f32e7eSjoerg       else
98806f32e7eSjoerg         assert(Info.Offset == 0 &&
98906f32e7eSjoerg                "Little endian union bitfield with a non-zero offset");
99006f32e7eSjoerg       assert(Info.StorageSize <= SL->getSizeInBits() &&
99106f32e7eSjoerg              "Union not large enough for bitfield storage");
99206f32e7eSjoerg     } else {
993*13fbcb42Sjoerg       assert((Info.StorageSize ==
994*13fbcb42Sjoerg                   getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
995*13fbcb42Sjoerg               Info.VolatileStorageSize ==
996*13fbcb42Sjoerg                   getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
99706f32e7eSjoerg              "Storage size does not match the element type size");
99806f32e7eSjoerg     }
99906f32e7eSjoerg     assert(Info.Size > 0 && "Empty bitfield!");
100006f32e7eSjoerg     assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
100106f32e7eSjoerg            "Bitfield outside of its allocated storage");
100206f32e7eSjoerg   }
100306f32e7eSjoerg #endif
100406f32e7eSjoerg 
100506f32e7eSjoerg   return RL;
100606f32e7eSjoerg }
100706f32e7eSjoerg 
print(raw_ostream & OS) const100806f32e7eSjoerg void CGRecordLayout::print(raw_ostream &OS) const {
100906f32e7eSjoerg   OS << "<CGRecordLayout\n";
101006f32e7eSjoerg   OS << "  LLVMType:" << *CompleteObjectType << "\n";
101106f32e7eSjoerg   if (BaseSubobjectType)
101206f32e7eSjoerg     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
101306f32e7eSjoerg   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
101406f32e7eSjoerg   OS << "  BitFields:[\n";
101506f32e7eSjoerg 
101606f32e7eSjoerg   // Print bit-field infos in declaration order.
101706f32e7eSjoerg   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
101806f32e7eSjoerg   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
101906f32e7eSjoerg          it = BitFields.begin(), ie = BitFields.end();
102006f32e7eSjoerg        it != ie; ++it) {
102106f32e7eSjoerg     const RecordDecl *RD = it->first->getParent();
102206f32e7eSjoerg     unsigned Index = 0;
102306f32e7eSjoerg     for (RecordDecl::field_iterator
102406f32e7eSjoerg            it2 = RD->field_begin(); *it2 != it->first; ++it2)
102506f32e7eSjoerg       ++Index;
102606f32e7eSjoerg     BFIs.push_back(std::make_pair(Index, &it->second));
102706f32e7eSjoerg   }
102806f32e7eSjoerg   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
102906f32e7eSjoerg   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
103006f32e7eSjoerg     OS.indent(4);
103106f32e7eSjoerg     BFIs[i].second->print(OS);
103206f32e7eSjoerg     OS << "\n";
103306f32e7eSjoerg   }
103406f32e7eSjoerg 
103506f32e7eSjoerg   OS << "]>\n";
103606f32e7eSjoerg }
103706f32e7eSjoerg 
dump() const103806f32e7eSjoerg LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
103906f32e7eSjoerg   print(llvm::errs());
104006f32e7eSjoerg }
104106f32e7eSjoerg 
print(raw_ostream & OS) const104206f32e7eSjoerg void CGBitFieldInfo::print(raw_ostream &OS) const {
104306f32e7eSjoerg   OS << "<CGBitFieldInfo"
1044*13fbcb42Sjoerg      << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
104506f32e7eSjoerg      << " StorageSize:" << StorageSize
1046*13fbcb42Sjoerg      << " StorageOffset:" << StorageOffset.getQuantity()
1047*13fbcb42Sjoerg      << " VolatileOffset:" << VolatileOffset
1048*13fbcb42Sjoerg      << " VolatileStorageSize:" << VolatileStorageSize
1049*13fbcb42Sjoerg      << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
105006f32e7eSjoerg }
105106f32e7eSjoerg 
dump() const105206f32e7eSjoerg LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
105306f32e7eSjoerg   print(llvm::errs());
105406f32e7eSjoerg }
1055