10b57cec5SDimitry Andric //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // Builder implementation for CGRecordLayout objects.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric 
130b57cec5SDimitry Andric #include "CGRecordLayout.h"
140b57cec5SDimitry Andric #include "CGCXXABI.h"
150b57cec5SDimitry Andric #include "CodeGenTypes.h"
160b57cec5SDimitry Andric #include "clang/AST/ASTContext.h"
170b57cec5SDimitry Andric #include "clang/AST/Attr.h"
180b57cec5SDimitry Andric #include "clang/AST/CXXInheritance.h"
190b57cec5SDimitry Andric #include "clang/AST/DeclCXX.h"
200b57cec5SDimitry Andric #include "clang/AST/Expr.h"
210b57cec5SDimitry Andric #include "clang/AST/RecordLayout.h"
220b57cec5SDimitry Andric #include "clang/Basic/CodeGenOptions.h"
230b57cec5SDimitry Andric #include "llvm/IR/DataLayout.h"
240b57cec5SDimitry Andric #include "llvm/IR/DerivedTypes.h"
250b57cec5SDimitry Andric #include "llvm/IR/Type.h"
260b57cec5SDimitry Andric #include "llvm/Support/Debug.h"
270b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h"
280b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h"
290b57cec5SDimitry Andric using namespace clang;
300b57cec5SDimitry Andric using namespace CodeGen;
310b57cec5SDimitry Andric 
320b57cec5SDimitry Andric namespace {
330b57cec5SDimitry Andric /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
340b57cec5SDimitry Andric /// llvm::Type.  Some of the lowering is straightforward, some is not.  Here we
350b57cec5SDimitry Andric /// detail some of the complexities and weirdnesses here.
360b57cec5SDimitry Andric /// * LLVM does not have unions - Unions can, in theory be represented by any
370b57cec5SDimitry Andric ///   llvm::Type with correct size.  We choose a field via a specific heuristic
380b57cec5SDimitry Andric ///   and add padding if necessary.
390b57cec5SDimitry Andric /// * LLVM does not have bitfields - Bitfields are collected into contiguous
400b57cec5SDimitry Andric ///   runs and allocated as a single storage type for the run.  ASTRecordLayout
410b57cec5SDimitry Andric ///   contains enough information to determine where the runs break.  Microsoft
420b57cec5SDimitry Andric ///   and Itanium follow different rules and use different codepaths.
430b57cec5SDimitry Andric /// * It is desired that, when possible, bitfields use the appropriate iN type
440b57cec5SDimitry Andric ///   when lowered to llvm types.  For example unsigned x : 24 gets lowered to
450b57cec5SDimitry Andric ///   i24.  This isn't always possible because i24 has storage size of 32 bit
460b57cec5SDimitry Andric ///   and if it is possible to use that extra byte of padding we must use
470b57cec5SDimitry Andric ///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
480b57cec5SDimitry Andric ///   C++ examples that require clipping:
490b57cec5SDimitry Andric ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
500b57cec5SDimitry Andric ///   struct A { int a : 24; }; // a must be clipped because a struct like B
510b57cec5SDimitry Andric //    could exist: struct B : A { char b; }; // b goes at offset 3
520b57cec5SDimitry Andric /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
530b57cec5SDimitry Andric ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
540b57cec5SDimitry Andric ///   has an underlying storage type.  Therefore empty structures containing
550b57cec5SDimitry Andric ///   zero sized subobjects such as empty records or zero sized arrays still get
560b57cec5SDimitry Andric ///   a zero sized (empty struct) storage type.
570b57cec5SDimitry Andric /// * Clang reads the complete type rather than the base type when generating
580b57cec5SDimitry Andric ///   code to access fields.  Bitfields in tail position with tail padding may
590b57cec5SDimitry Andric ///   be clipped in the base class but not the complete class (we may discover
600b57cec5SDimitry Andric ///   that the tail padding is not used in the complete class.) However,
610b57cec5SDimitry Andric ///   because LLVM reads from the complete type it can generate incorrect code
620b57cec5SDimitry Andric ///   if we do not clip the tail padding off of the bitfield in the complete
630b57cec5SDimitry Andric ///   layout.  This introduces a somewhat awkward extra unnecessary clip stage.
640b57cec5SDimitry Andric ///   The location of the clip is stored internally as a sentinel of type
650b57cec5SDimitry Andric ///   SCISSOR.  If LLVM were updated to read base types (which it probably
660b57cec5SDimitry Andric ///   should because locations of things such as VBases are bogus in the llvm
670b57cec5SDimitry Andric ///   type anyway) then we could eliminate the SCISSOR.
680b57cec5SDimitry Andric /// * Itanium allows nearly empty primary virtual bases.  These bases don't get
690b57cec5SDimitry Andric ///   get their own storage because they're laid out as part of another base
700b57cec5SDimitry Andric ///   or at the beginning of the structure.  Determining if a VBase actually
710b57cec5SDimitry Andric ///   gets storage awkwardly involves a walk of all bases.
720b57cec5SDimitry Andric /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
730b57cec5SDimitry Andric struct CGRecordLowering {
740b57cec5SDimitry Andric   // MemberInfo is a helper structure that contains information about a record
750b57cec5SDimitry Andric   // member.  In additional to the standard member types, there exists a
760b57cec5SDimitry Andric   // sentinel member type that ensures correct rounding.
770b57cec5SDimitry Andric   struct MemberInfo {
780b57cec5SDimitry Andric     CharUnits Offset;
790b57cec5SDimitry Andric     enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
800b57cec5SDimitry Andric     llvm::Type *Data;
810b57cec5SDimitry Andric     union {
820b57cec5SDimitry Andric       const FieldDecl *FD;
830b57cec5SDimitry Andric       const CXXRecordDecl *RD;
840b57cec5SDimitry Andric     };
MemberInfo__anon6f4a6c140111::CGRecordLowering::MemberInfo850b57cec5SDimitry Andric     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
860b57cec5SDimitry Andric                const FieldDecl *FD = nullptr)
870b57cec5SDimitry Andric       : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
MemberInfo__anon6f4a6c140111::CGRecordLowering::MemberInfo880b57cec5SDimitry Andric     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
890b57cec5SDimitry Andric                const CXXRecordDecl *RD)
900b57cec5SDimitry Andric       : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
910b57cec5SDimitry Andric     // MemberInfos are sorted so we define a < operator.
operator <__anon6f4a6c140111::CGRecordLowering::MemberInfo920b57cec5SDimitry Andric     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
930b57cec5SDimitry Andric   };
940b57cec5SDimitry Andric   // The constructor.
950b57cec5SDimitry Andric   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
960b57cec5SDimitry Andric   // Short helper routines.
970b57cec5SDimitry Andric   /// Constructs a MemberInfo instance from an offset and llvm::Type *.
StorageInfo__anon6f4a6c140111::CGRecordLowering980b57cec5SDimitry Andric   MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
990b57cec5SDimitry Andric     return MemberInfo(Offset, MemberInfo::Field, Data);
1000b57cec5SDimitry Andric   }
1010b57cec5SDimitry Andric 
1020b57cec5SDimitry Andric   /// The Microsoft bitfield layout rule allocates discrete storage
1030b57cec5SDimitry Andric   /// units of the field's formal type and only combines adjacent
1040b57cec5SDimitry Andric   /// fields of the same formal type.  We want to emit a layout with
1050b57cec5SDimitry Andric   /// these discrete storage units instead of combining them into a
1060b57cec5SDimitry Andric   /// continuous run.
isDiscreteBitFieldABI__anon6f4a6c140111::CGRecordLowering1070b57cec5SDimitry Andric   bool isDiscreteBitFieldABI() {
1080b57cec5SDimitry Andric     return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1090b57cec5SDimitry Andric            D->isMsStruct(Context);
1100b57cec5SDimitry Andric   }
1110b57cec5SDimitry Andric 
112e8d8bef9SDimitry Andric   /// Helper function to check if we are targeting AAPCS.
isAAPCS__anon6f4a6c140111::CGRecordLowering113e8d8bef9SDimitry Andric   bool isAAPCS() const {
1145f757f3fSDimitry Andric     return Context.getTargetInfo().getABI().starts_with("aapcs");
115e8d8bef9SDimitry Andric   }
116e8d8bef9SDimitry Andric 
117e8d8bef9SDimitry Andric   /// Helper function to check if the target machine is BigEndian.
isBE__anon6f4a6c140111::CGRecordLowering118e8d8bef9SDimitry Andric   bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
119e8d8bef9SDimitry Andric 
1200b57cec5SDimitry Andric   /// The Itanium base layout rule allows virtual bases to overlap
1210b57cec5SDimitry Andric   /// other bases, which complicates layout in specific ways.
1220b57cec5SDimitry Andric   ///
1230b57cec5SDimitry Andric   /// Note specifically that the ms_struct attribute doesn't change this.
isOverlappingVBaseABI__anon6f4a6c140111::CGRecordLowering1240b57cec5SDimitry Andric   bool isOverlappingVBaseABI() {
1250b57cec5SDimitry Andric     return !Context.getTargetInfo().getCXXABI().isMicrosoft();
1260b57cec5SDimitry Andric   }
1270b57cec5SDimitry Andric 
1280b57cec5SDimitry Andric   /// Wraps llvm::Type::getIntNTy with some implicit arguments.
getIntNType__anon6f4a6c140111::CGRecordLowering1290b57cec5SDimitry Andric   llvm::Type *getIntNType(uint64_t NumBits) {
130e8d8bef9SDimitry Andric     unsigned AlignedBits = llvm::alignTo(NumBits, Context.getCharWidth());
131e8d8bef9SDimitry Andric     return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
1320b57cec5SDimitry Andric   }
133e8d8bef9SDimitry Andric   /// Get the LLVM type sized as one character unit.
getCharType__anon6f4a6c140111::CGRecordLowering134e8d8bef9SDimitry Andric   llvm::Type *getCharType() {
135e8d8bef9SDimitry Andric     return llvm::Type::getIntNTy(Types.getLLVMContext(),
136e8d8bef9SDimitry Andric                                  Context.getCharWidth());
137e8d8bef9SDimitry Andric   }
138e8d8bef9SDimitry Andric   /// Gets an llvm type of size NumChars and alignment 1.
getByteArrayType__anon6f4a6c140111::CGRecordLowering139e8d8bef9SDimitry Andric   llvm::Type *getByteArrayType(CharUnits NumChars) {
140e8d8bef9SDimitry Andric     assert(!NumChars.isZero() && "Empty byte arrays aren't allowed.");
141e8d8bef9SDimitry Andric     llvm::Type *Type = getCharType();
142e8d8bef9SDimitry Andric     return NumChars == CharUnits::One() ? Type :
143e8d8bef9SDimitry Andric         (llvm::Type *)llvm::ArrayType::get(Type, NumChars.getQuantity());
1440b57cec5SDimitry Andric   }
1450b57cec5SDimitry Andric   /// Gets the storage type for a field decl and handles storage
1460b57cec5SDimitry Andric   /// for itanium bitfields that are smaller than their declared type.
getStorageType__anon6f4a6c140111::CGRecordLowering1470b57cec5SDimitry Andric   llvm::Type *getStorageType(const FieldDecl *FD) {
1480b57cec5SDimitry Andric     llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
1490b57cec5SDimitry Andric     if (!FD->isBitField()) return Type;
1500b57cec5SDimitry Andric     if (isDiscreteBitFieldABI()) return Type;
1510b57cec5SDimitry Andric     return getIntNType(std::min(FD->getBitWidthValue(Context),
1520b57cec5SDimitry Andric                              (unsigned)Context.toBits(getSize(Type))));
1530b57cec5SDimitry Andric   }
1540b57cec5SDimitry Andric   /// Gets the llvm Basesubobject type from a CXXRecordDecl.
getStorageType__anon6f4a6c140111::CGRecordLowering1550b57cec5SDimitry Andric   llvm::Type *getStorageType(const CXXRecordDecl *RD) {
1560b57cec5SDimitry Andric     return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
1570b57cec5SDimitry Andric   }
bitsToCharUnits__anon6f4a6c140111::CGRecordLowering1580b57cec5SDimitry Andric   CharUnits bitsToCharUnits(uint64_t BitOffset) {
1590b57cec5SDimitry Andric     return Context.toCharUnitsFromBits(BitOffset);
1600b57cec5SDimitry Andric   }
getSize__anon6f4a6c140111::CGRecordLowering1610b57cec5SDimitry Andric   CharUnits getSize(llvm::Type *Type) {
1620b57cec5SDimitry Andric     return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
1630b57cec5SDimitry Andric   }
getAlignment__anon6f4a6c140111::CGRecordLowering1640b57cec5SDimitry Andric   CharUnits getAlignment(llvm::Type *Type) {
165bdd1243dSDimitry Andric     return CharUnits::fromQuantity(DataLayout.getABITypeAlign(Type));
1660b57cec5SDimitry Andric   }
isZeroInitializable__anon6f4a6c140111::CGRecordLowering1670b57cec5SDimitry Andric   bool isZeroInitializable(const FieldDecl *FD) {
1680b57cec5SDimitry Andric     return Types.isZeroInitializable(FD->getType());
1690b57cec5SDimitry Andric   }
isZeroInitializable__anon6f4a6c140111::CGRecordLowering1700b57cec5SDimitry Andric   bool isZeroInitializable(const RecordDecl *RD) {
1710b57cec5SDimitry Andric     return Types.isZeroInitializable(RD);
1720b57cec5SDimitry Andric   }
appendPaddingBytes__anon6f4a6c140111::CGRecordLowering1730b57cec5SDimitry Andric   void appendPaddingBytes(CharUnits Size) {
1740b57cec5SDimitry Andric     if (!Size.isZero())
1750b57cec5SDimitry Andric       FieldTypes.push_back(getByteArrayType(Size));
1760b57cec5SDimitry Andric   }
getFieldBitOffset__anon6f4a6c140111::CGRecordLowering1770b57cec5SDimitry Andric   uint64_t getFieldBitOffset(const FieldDecl *FD) {
1780b57cec5SDimitry Andric     return Layout.getFieldOffset(FD->getFieldIndex());
1790b57cec5SDimitry Andric   }
1800b57cec5SDimitry Andric   // Layout routines.
1810b57cec5SDimitry Andric   void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
1820b57cec5SDimitry Andric                        llvm::Type *StorageType);
1830b57cec5SDimitry Andric   /// Lowers an ASTRecordLayout to a llvm type.
1840b57cec5SDimitry Andric   void lower(bool NonVirtualBaseType);
18506c3fb27SDimitry Andric   void lowerUnion(bool isNoUniqueAddress);
1860b57cec5SDimitry Andric   void accumulateFields();
1870b57cec5SDimitry Andric   void accumulateBitFields(RecordDecl::field_iterator Field,
1880b57cec5SDimitry Andric                            RecordDecl::field_iterator FieldEnd);
189e8d8bef9SDimitry Andric   void computeVolatileBitfields();
1900b57cec5SDimitry Andric   void accumulateBases();
1910b57cec5SDimitry Andric   void accumulateVPtrs();
1920b57cec5SDimitry Andric   void accumulateVBases();
1930b57cec5SDimitry Andric   /// Recursively searches all of the bases to find out if a vbase is
1940b57cec5SDimitry Andric   /// not the primary vbase of some base class.
1950b57cec5SDimitry Andric   bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
1960b57cec5SDimitry Andric   void calculateZeroInit();
1970b57cec5SDimitry Andric   /// Lowers bitfield storage types to I8 arrays for bitfields with tail
1980b57cec5SDimitry Andric   /// padding that is or can potentially be used.
1990b57cec5SDimitry Andric   void clipTailPadding();
2000b57cec5SDimitry Andric   /// Determines if we need a packed llvm struct.
2010b57cec5SDimitry Andric   void determinePacked(bool NVBaseType);
2020b57cec5SDimitry Andric   /// Inserts padding everywhere it's needed.
2030b57cec5SDimitry Andric   void insertPadding();
2040b57cec5SDimitry Andric   /// Fills out the structures that are ultimately consumed.
2050b57cec5SDimitry Andric   void fillOutputFields();
2060b57cec5SDimitry Andric   // Input memoization fields.
2070b57cec5SDimitry Andric   CodeGenTypes &Types;
2080b57cec5SDimitry Andric   const ASTContext &Context;
2090b57cec5SDimitry Andric   const RecordDecl *D;
2100b57cec5SDimitry Andric   const CXXRecordDecl *RD;
2110b57cec5SDimitry Andric   const ASTRecordLayout &Layout;
2120b57cec5SDimitry Andric   const llvm::DataLayout &DataLayout;
2130b57cec5SDimitry Andric   // Helpful intermediate data-structures.
2140b57cec5SDimitry Andric   std::vector<MemberInfo> Members;
2150b57cec5SDimitry Andric   // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
2160b57cec5SDimitry Andric   SmallVector<llvm::Type *, 16> FieldTypes;
2170b57cec5SDimitry Andric   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
2180b57cec5SDimitry Andric   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
2190b57cec5SDimitry Andric   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
2200b57cec5SDimitry Andric   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
2210b57cec5SDimitry Andric   bool IsZeroInitializable : 1;
2220b57cec5SDimitry Andric   bool IsZeroInitializableAsBase : 1;
2230b57cec5SDimitry Andric   bool Packed : 1;
2240b57cec5SDimitry Andric private:
2250b57cec5SDimitry Andric   CGRecordLowering(const CGRecordLowering &) = delete;
2260b57cec5SDimitry Andric   void operator =(const CGRecordLowering &) = delete;
2270b57cec5SDimitry Andric };
2280b57cec5SDimitry Andric } // namespace {
2290b57cec5SDimitry Andric 
CGRecordLowering(CodeGenTypes & Types,const RecordDecl * D,bool Packed)2300b57cec5SDimitry Andric CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
2310b57cec5SDimitry Andric                                    bool Packed)
2320b57cec5SDimitry Andric     : Types(Types), Context(Types.getContext()), D(D),
2330b57cec5SDimitry Andric       RD(dyn_cast<CXXRecordDecl>(D)),
2340b57cec5SDimitry Andric       Layout(Types.getContext().getASTRecordLayout(D)),
2350b57cec5SDimitry Andric       DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
2360b57cec5SDimitry Andric       IsZeroInitializableAsBase(true), Packed(Packed) {}
2370b57cec5SDimitry Andric 
setBitFieldInfo(const FieldDecl * FD,CharUnits StartOffset,llvm::Type * StorageType)2380b57cec5SDimitry Andric void CGRecordLowering::setBitFieldInfo(
2390b57cec5SDimitry Andric     const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
2400b57cec5SDimitry Andric   CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
2410b57cec5SDimitry Andric   Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
2420b57cec5SDimitry Andric   Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
2430b57cec5SDimitry Andric   Info.Size = FD->getBitWidthValue(Context);
2440b57cec5SDimitry Andric   Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
2450b57cec5SDimitry Andric   Info.StorageOffset = StartOffset;
2460b57cec5SDimitry Andric   if (Info.Size > Info.StorageSize)
2470b57cec5SDimitry Andric     Info.Size = Info.StorageSize;
2480b57cec5SDimitry Andric   // Reverse the bit offsets for big endian machines. Because we represent
2490b57cec5SDimitry Andric   // a bitfield as a single large integer load, we can imagine the bits
2500b57cec5SDimitry Andric   // counting from the most-significant-bit instead of the
2510b57cec5SDimitry Andric   // least-significant-bit.
2520b57cec5SDimitry Andric   if (DataLayout.isBigEndian())
2530b57cec5SDimitry Andric     Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
254e8d8bef9SDimitry Andric 
255e8d8bef9SDimitry Andric   Info.VolatileStorageSize = 0;
256e8d8bef9SDimitry Andric   Info.VolatileOffset = 0;
257e8d8bef9SDimitry Andric   Info.VolatileStorageOffset = CharUnits::Zero();
2580b57cec5SDimitry Andric }
2590b57cec5SDimitry Andric 
lower(bool NVBaseType)2600b57cec5SDimitry Andric void CGRecordLowering::lower(bool NVBaseType) {
2610b57cec5SDimitry Andric   // The lowering process implemented in this function takes a variety of
2620b57cec5SDimitry Andric   // carefully ordered phases.
2630b57cec5SDimitry Andric   // 1) Store all members (fields and bases) in a list and sort them by offset.
2640b57cec5SDimitry Andric   // 2) Add a 1-byte capstone member at the Size of the structure.
2650b57cec5SDimitry Andric   // 3) Clip bitfield storages members if their tail padding is or might be
2660b57cec5SDimitry Andric   //    used by another field or base.  The clipping process uses the capstone
2670b57cec5SDimitry Andric   //    by treating it as another object that occurs after the record.
2680b57cec5SDimitry Andric   // 4) Determine if the llvm-struct requires packing.  It's important that this
2690b57cec5SDimitry Andric   //    phase occur after clipping, because clipping changes the llvm type.
2700b57cec5SDimitry Andric   //    This phase reads the offset of the capstone when determining packedness
2710b57cec5SDimitry Andric   //    and updates the alignment of the capstone to be equal of the alignment
2720b57cec5SDimitry Andric   //    of the record after doing so.
2730b57cec5SDimitry Andric   // 5) Insert padding everywhere it is needed.  This phase requires 'Packed' to
2740b57cec5SDimitry Andric   //    have been computed and needs to know the alignment of the record in
2750b57cec5SDimitry Andric   //    order to understand if explicit tail padding is needed.
2760b57cec5SDimitry Andric   // 6) Remove the capstone, we don't need it anymore.
2770b57cec5SDimitry Andric   // 7) Determine if this record can be zero-initialized.  This phase could have
2780b57cec5SDimitry Andric   //    been placed anywhere after phase 1.
2790b57cec5SDimitry Andric   // 8) Format the complete list of members in a way that can be consumed by
2800b57cec5SDimitry Andric   //    CodeGenTypes::ComputeRecordLayout.
2810b57cec5SDimitry Andric   CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
282e8d8bef9SDimitry Andric   if (D->isUnion()) {
28306c3fb27SDimitry Andric     lowerUnion(NVBaseType);
284e8d8bef9SDimitry Andric     computeVolatileBitfields();
285e8d8bef9SDimitry Andric     return;
286e8d8bef9SDimitry Andric   }
2870b57cec5SDimitry Andric   accumulateFields();
2880b57cec5SDimitry Andric   // RD implies C++.
2890b57cec5SDimitry Andric   if (RD) {
2900b57cec5SDimitry Andric     accumulateVPtrs();
2910b57cec5SDimitry Andric     accumulateBases();
292e8d8bef9SDimitry Andric     if (Members.empty()) {
293e8d8bef9SDimitry Andric       appendPaddingBytes(Size);
294e8d8bef9SDimitry Andric       computeVolatileBitfields();
295e8d8bef9SDimitry Andric       return;
296e8d8bef9SDimitry Andric     }
2970b57cec5SDimitry Andric     if (!NVBaseType)
2980b57cec5SDimitry Andric       accumulateVBases();
2990b57cec5SDimitry Andric   }
3000b57cec5SDimitry Andric   llvm::stable_sort(Members);
3010b57cec5SDimitry Andric   Members.push_back(StorageInfo(Size, getIntNType(8)));
3020b57cec5SDimitry Andric   clipTailPadding();
3030b57cec5SDimitry Andric   determinePacked(NVBaseType);
3040b57cec5SDimitry Andric   insertPadding();
3050b57cec5SDimitry Andric   Members.pop_back();
3060b57cec5SDimitry Andric   calculateZeroInit();
3070b57cec5SDimitry Andric   fillOutputFields();
308e8d8bef9SDimitry Andric   computeVolatileBitfields();
3090b57cec5SDimitry Andric }
3100b57cec5SDimitry Andric 
lowerUnion(bool isNoUniqueAddress)31106c3fb27SDimitry Andric void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
31206c3fb27SDimitry Andric   CharUnits LayoutSize =
31306c3fb27SDimitry Andric       isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize();
3140b57cec5SDimitry Andric   llvm::Type *StorageType = nullptr;
3150b57cec5SDimitry Andric   bool SeenNamedMember = false;
3160b57cec5SDimitry Andric   // Iterate through the fields setting bitFieldInfo and the Fields array. Also
3170b57cec5SDimitry Andric   // locate the "most appropriate" storage type.  The heuristic for finding the
3180b57cec5SDimitry Andric   // storage type isn't necessary, the first (non-0-length-bitfield) field's
3190b57cec5SDimitry Andric   // type would work fine and be simpler but would be different than what we've
3200b57cec5SDimitry Andric   // been doing and cause lit tests to change.
3210b57cec5SDimitry Andric   for (const auto *Field : D->fields()) {
3220b57cec5SDimitry Andric     if (Field->isBitField()) {
3230b57cec5SDimitry Andric       if (Field->isZeroLengthBitField(Context))
3240b57cec5SDimitry Andric         continue;
3250b57cec5SDimitry Andric       llvm::Type *FieldType = getStorageType(Field);
3260b57cec5SDimitry Andric       if (LayoutSize < getSize(FieldType))
3270b57cec5SDimitry Andric         FieldType = getByteArrayType(LayoutSize);
3280b57cec5SDimitry Andric       setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
3290b57cec5SDimitry Andric     }
3300b57cec5SDimitry Andric     Fields[Field->getCanonicalDecl()] = 0;
3310b57cec5SDimitry Andric     llvm::Type *FieldType = getStorageType(Field);
3320b57cec5SDimitry Andric     // Compute zero-initializable status.
3330b57cec5SDimitry Andric     // This union might not be zero initialized: it may contain a pointer to
3340b57cec5SDimitry Andric     // data member which might have some exotic initialization sequence.
3350b57cec5SDimitry Andric     // If this is the case, then we aught not to try and come up with a "better"
3360b57cec5SDimitry Andric     // type, it might not be very easy to come up with a Constant which
3370b57cec5SDimitry Andric     // correctly initializes it.
3380b57cec5SDimitry Andric     if (!SeenNamedMember) {
3390b57cec5SDimitry Andric       SeenNamedMember = Field->getIdentifier();
3400b57cec5SDimitry Andric       if (!SeenNamedMember)
3410b57cec5SDimitry Andric         if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
3420b57cec5SDimitry Andric           SeenNamedMember = FieldRD->findFirstNamedDataMember();
3430b57cec5SDimitry Andric       if (SeenNamedMember && !isZeroInitializable(Field)) {
3440b57cec5SDimitry Andric         IsZeroInitializable = IsZeroInitializableAsBase = false;
3450b57cec5SDimitry Andric         StorageType = FieldType;
3460b57cec5SDimitry Andric       }
3470b57cec5SDimitry Andric     }
3480b57cec5SDimitry Andric     // Because our union isn't zero initializable, we won't be getting a better
3490b57cec5SDimitry Andric     // storage type.
3500b57cec5SDimitry Andric     if (!IsZeroInitializable)
3510b57cec5SDimitry Andric       continue;
3520b57cec5SDimitry Andric     // Conditionally update our storage type if we've got a new "better" one.
3530b57cec5SDimitry Andric     if (!StorageType ||
3540b57cec5SDimitry Andric         getAlignment(FieldType) >  getAlignment(StorageType) ||
3550b57cec5SDimitry Andric         (getAlignment(FieldType) == getAlignment(StorageType) &&
3560b57cec5SDimitry Andric         getSize(FieldType) > getSize(StorageType)))
3570b57cec5SDimitry Andric       StorageType = FieldType;
3580b57cec5SDimitry Andric   }
3590b57cec5SDimitry Andric   // If we have no storage type just pad to the appropriate size and return.
3600b57cec5SDimitry Andric   if (!StorageType)
3610b57cec5SDimitry Andric     return appendPaddingBytes(LayoutSize);
3620b57cec5SDimitry Andric   // If our storage size was bigger than our required size (can happen in the
3630b57cec5SDimitry Andric   // case of packed bitfields on Itanium) then just use an I8 array.
3640b57cec5SDimitry Andric   if (LayoutSize < getSize(StorageType))
3650b57cec5SDimitry Andric     StorageType = getByteArrayType(LayoutSize);
3660b57cec5SDimitry Andric   FieldTypes.push_back(StorageType);
3670b57cec5SDimitry Andric   appendPaddingBytes(LayoutSize - getSize(StorageType));
3680b57cec5SDimitry Andric   // Set packed if we need it.
36906c3fb27SDimitry Andric   const auto StorageAlignment = getAlignment(StorageType);
37006c3fb27SDimitry Andric   assert((Layout.getSize() % StorageAlignment == 0 ||
37106c3fb27SDimitry Andric           Layout.getDataSize() % StorageAlignment) &&
37206c3fb27SDimitry Andric          "Union's standard layout and no_unique_address layout must agree on "
37306c3fb27SDimitry Andric          "packedness");
37406c3fb27SDimitry Andric   if (Layout.getDataSize() % StorageAlignment)
3750b57cec5SDimitry Andric     Packed = true;
3760b57cec5SDimitry Andric }
3770b57cec5SDimitry Andric 
accumulateFields()3780b57cec5SDimitry Andric void CGRecordLowering::accumulateFields() {
3790b57cec5SDimitry Andric   for (RecordDecl::field_iterator Field = D->field_begin(),
3800b57cec5SDimitry Andric                                   FieldEnd = D->field_end();
3810b57cec5SDimitry Andric     Field != FieldEnd;) {
3820b57cec5SDimitry Andric     if (Field->isBitField()) {
3830b57cec5SDimitry Andric       RecordDecl::field_iterator Start = Field;
3840b57cec5SDimitry Andric       // Iterate to gather the list of bitfields.
3850b57cec5SDimitry Andric       for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
3860b57cec5SDimitry Andric       accumulateBitFields(Start, Field);
3870b57cec5SDimitry Andric     } else if (!Field->isZeroSize(Context)) {
38806c3fb27SDimitry Andric       // Use base subobject layout for the potentially-overlapping field,
38906c3fb27SDimitry Andric       // as it is done in RecordLayoutBuilder
3900b57cec5SDimitry Andric       Members.push_back(MemberInfo(
3910b57cec5SDimitry Andric           bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
39206c3fb27SDimitry Andric           Field->isPotentiallyOverlapping()
39306c3fb27SDimitry Andric               ? getStorageType(Field->getType()->getAsCXXRecordDecl())
39406c3fb27SDimitry Andric               : getStorageType(*Field),
39506c3fb27SDimitry Andric           *Field));
3960b57cec5SDimitry Andric       ++Field;
3970b57cec5SDimitry Andric     } else {
3980b57cec5SDimitry Andric       ++Field;
3990b57cec5SDimitry Andric     }
4000b57cec5SDimitry Andric   }
4010b57cec5SDimitry Andric }
4020b57cec5SDimitry Andric 
4030b57cec5SDimitry Andric void
accumulateBitFields(RecordDecl::field_iterator Field,RecordDecl::field_iterator FieldEnd)4040b57cec5SDimitry Andric CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
4050b57cec5SDimitry Andric                                       RecordDecl::field_iterator FieldEnd) {
4060b57cec5SDimitry Andric   // Run stores the first element of the current run of bitfields.  FieldEnd is
4070b57cec5SDimitry Andric   // used as a special value to note that we don't have a current run.  A
4080b57cec5SDimitry Andric   // bitfield run is a contiguous collection of bitfields that can be stored in
4090b57cec5SDimitry Andric   // the same storage block.  Zero-sized bitfields and bitfields that would
4100b57cec5SDimitry Andric   // cross an alignment boundary break a run and start a new one.
4110b57cec5SDimitry Andric   RecordDecl::field_iterator Run = FieldEnd;
4120b57cec5SDimitry Andric   // Tail is the offset of the first bit off the end of the current run.  It's
4130b57cec5SDimitry Andric   // used to determine if the ASTRecordLayout is treating these two bitfields as
4140b57cec5SDimitry Andric   // contiguous.  StartBitOffset is offset of the beginning of the Run.
4150b57cec5SDimitry Andric   uint64_t StartBitOffset, Tail = 0;
4160b57cec5SDimitry Andric   if (isDiscreteBitFieldABI()) {
4170b57cec5SDimitry Andric     for (; Field != FieldEnd; ++Field) {
4180b57cec5SDimitry Andric       uint64_t BitOffset = getFieldBitOffset(*Field);
4190b57cec5SDimitry Andric       // Zero-width bitfields end runs.
4200b57cec5SDimitry Andric       if (Field->isZeroLengthBitField(Context)) {
4210b57cec5SDimitry Andric         Run = FieldEnd;
4220b57cec5SDimitry Andric         continue;
4230b57cec5SDimitry Andric       }
4245ffd83dbSDimitry Andric       llvm::Type *Type =
42504eeddc0SDimitry Andric           Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
4260b57cec5SDimitry Andric       // If we don't have a run yet, or don't live within the previous run's
4270b57cec5SDimitry Andric       // allocated storage then we allocate some storage and start a new run.
4280b57cec5SDimitry Andric       if (Run == FieldEnd || BitOffset >= Tail) {
4290b57cec5SDimitry Andric         Run = Field;
4300b57cec5SDimitry Andric         StartBitOffset = BitOffset;
4310b57cec5SDimitry Andric         Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
4320b57cec5SDimitry Andric         // Add the storage member to the record.  This must be added to the
4330b57cec5SDimitry Andric         // record before the bitfield members so that it gets laid out before
4340b57cec5SDimitry Andric         // the bitfields it contains get laid out.
4350b57cec5SDimitry Andric         Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
4360b57cec5SDimitry Andric       }
4370b57cec5SDimitry Andric       // Bitfields get the offset of their storage but come afterward and remain
4380b57cec5SDimitry Andric       // there after a stable sort.
4390b57cec5SDimitry Andric       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
4400b57cec5SDimitry Andric                                    MemberInfo::Field, nullptr, *Field));
4410b57cec5SDimitry Andric     }
4420b57cec5SDimitry Andric     return;
4430b57cec5SDimitry Andric   }
4440b57cec5SDimitry Andric 
4455ffd83dbSDimitry Andric   // Check if OffsetInRecord (the size in bits of the current run) is better
4465ffd83dbSDimitry Andric   // as a single field run. When OffsetInRecord has legal integer width, and
4475ffd83dbSDimitry Andric   // its bitfield offset is naturally aligned, it is better to make the
4485ffd83dbSDimitry Andric   // bitfield a separate storage component so as it can be accessed directly
4495ffd83dbSDimitry Andric   // with lower cost.
4500b57cec5SDimitry Andric   auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
4510b57cec5SDimitry Andric                                       uint64_t StartBitOffset) {
4520b57cec5SDimitry Andric     if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
4530b57cec5SDimitry Andric       return false;
4545ffd83dbSDimitry Andric     if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
4555ffd83dbSDimitry Andric         !DataLayout.fitsInLegalInteger(OffsetInRecord))
4560b57cec5SDimitry Andric       return false;
457e8d8bef9SDimitry Andric     // Make sure StartBitOffset is naturally aligned if it is treated as an
4580b57cec5SDimitry Andric     // IType integer.
4590b57cec5SDimitry Andric     if (StartBitOffset %
4600b57cec5SDimitry Andric             Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
4610b57cec5SDimitry Andric         0)
4620b57cec5SDimitry Andric       return false;
4630b57cec5SDimitry Andric     return true;
4640b57cec5SDimitry Andric   };
4650b57cec5SDimitry Andric 
4660b57cec5SDimitry Andric   // The start field is better as a single field run.
4670b57cec5SDimitry Andric   bool StartFieldAsSingleRun = false;
4680b57cec5SDimitry Andric   for (;;) {
4690b57cec5SDimitry Andric     // Check to see if we need to start a new run.
4700b57cec5SDimitry Andric     if (Run == FieldEnd) {
4710b57cec5SDimitry Andric       // If we're out of fields, return.
4720b57cec5SDimitry Andric       if (Field == FieldEnd)
4730b57cec5SDimitry Andric         break;
4740b57cec5SDimitry Andric       // Any non-zero-length bitfield can start a new run.
4750b57cec5SDimitry Andric       if (!Field->isZeroLengthBitField(Context)) {
4760b57cec5SDimitry Andric         Run = Field;
4770b57cec5SDimitry Andric         StartBitOffset = getFieldBitOffset(*Field);
4780b57cec5SDimitry Andric         Tail = StartBitOffset + Field->getBitWidthValue(Context);
4790b57cec5SDimitry Andric         StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
4800b57cec5SDimitry Andric                                                          StartBitOffset);
4810b57cec5SDimitry Andric       }
4820b57cec5SDimitry Andric       ++Field;
4830b57cec5SDimitry Andric       continue;
4840b57cec5SDimitry Andric     }
4850b57cec5SDimitry Andric 
4860b57cec5SDimitry Andric     // If the start field of a new run is better as a single run, or
4870b57cec5SDimitry Andric     // if current field (or consecutive fields) is better as a single run, or
4880b57cec5SDimitry Andric     // if current field has zero width bitfield and either
4890b57cec5SDimitry Andric     // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
4900b57cec5SDimitry Andric     // true, or
4910b57cec5SDimitry Andric     // if the offset of current field is inconsistent with the offset of
4920b57cec5SDimitry Andric     // previous field plus its offset,
4930b57cec5SDimitry Andric     // skip the block below and go ahead to emit the storage.
4940b57cec5SDimitry Andric     // Otherwise, try to add bitfields to the run.
4950b57cec5SDimitry Andric     if (!StartFieldAsSingleRun && Field != FieldEnd &&
4960b57cec5SDimitry Andric         !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
4970b57cec5SDimitry Andric         (!Field->isZeroLengthBitField(Context) ||
4980b57cec5SDimitry Andric          (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
4990b57cec5SDimitry Andric           !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
5000b57cec5SDimitry Andric         Tail == getFieldBitOffset(*Field)) {
5010b57cec5SDimitry Andric       Tail += Field->getBitWidthValue(Context);
5020b57cec5SDimitry Andric       ++Field;
5030b57cec5SDimitry Andric       continue;
5040b57cec5SDimitry Andric     }
5050b57cec5SDimitry Andric 
5060b57cec5SDimitry Andric     // We've hit a break-point in the run and need to emit a storage field.
5070b57cec5SDimitry Andric     llvm::Type *Type = getIntNType(Tail - StartBitOffset);
5080b57cec5SDimitry Andric     // Add the storage member to the record and set the bitfield info for all of
5090b57cec5SDimitry Andric     // the bitfields in the run.  Bitfields get the offset of their storage but
5100b57cec5SDimitry Andric     // come afterward and remain there after a stable sort.
5110b57cec5SDimitry Andric     Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
5120b57cec5SDimitry Andric     for (; Run != Field; ++Run)
5130b57cec5SDimitry Andric       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
5140b57cec5SDimitry Andric                                    MemberInfo::Field, nullptr, *Run));
5150b57cec5SDimitry Andric     Run = FieldEnd;
5160b57cec5SDimitry Andric     StartFieldAsSingleRun = false;
5170b57cec5SDimitry Andric   }
5180b57cec5SDimitry Andric }
5190b57cec5SDimitry Andric 
accumulateBases()5200b57cec5SDimitry Andric void CGRecordLowering::accumulateBases() {
5210b57cec5SDimitry Andric   // If we've got a primary virtual base, we need to add it with the bases.
5220b57cec5SDimitry Andric   if (Layout.isPrimaryBaseVirtual()) {
5230b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
5240b57cec5SDimitry Andric     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
5250b57cec5SDimitry Andric                                  getStorageType(BaseDecl), BaseDecl));
5260b57cec5SDimitry Andric   }
5270b57cec5SDimitry Andric   // Accumulate the non-virtual bases.
5280b57cec5SDimitry Andric   for (const auto &Base : RD->bases()) {
5290b57cec5SDimitry Andric     if (Base.isVirtual())
5300b57cec5SDimitry Andric       continue;
5310b57cec5SDimitry Andric 
5320b57cec5SDimitry Andric     // Bases can be zero-sized even if not technically empty if they
5330b57cec5SDimitry Andric     // contain only a trailing array member.
5340b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
5350b57cec5SDimitry Andric     if (!BaseDecl->isEmpty() &&
5360b57cec5SDimitry Andric         !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
5370b57cec5SDimitry Andric       Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
5380b57cec5SDimitry Andric           MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
5390b57cec5SDimitry Andric   }
5400b57cec5SDimitry Andric }
5410b57cec5SDimitry Andric 
542e8d8bef9SDimitry Andric /// The AAPCS that defines that, when possible, bit-fields should
543e8d8bef9SDimitry Andric /// be accessed using containers of the declared type width:
544e8d8bef9SDimitry Andric /// When a volatile bit-field is read, and its container does not overlap with
545e8d8bef9SDimitry Andric /// any non-bit-field member or any zero length bit-field member, its container
546e8d8bef9SDimitry Andric /// must be read exactly once using the access width appropriate to the type of
547e8d8bef9SDimitry Andric /// the container. When a volatile bit-field is written, and its container does
548e8d8bef9SDimitry Andric /// not overlap with any non-bit-field member or any zero-length bit-field
549e8d8bef9SDimitry Andric /// member, its container must be read exactly once and written exactly once
550e8d8bef9SDimitry Andric /// using the access width appropriate to the type of the container. The two
551e8d8bef9SDimitry Andric /// accesses are not atomic.
552e8d8bef9SDimitry Andric ///
553e8d8bef9SDimitry Andric /// Enforcing the width restriction can be disabled using
554e8d8bef9SDimitry Andric /// -fno-aapcs-bitfield-width.
computeVolatileBitfields()555e8d8bef9SDimitry Andric void CGRecordLowering::computeVolatileBitfields() {
556e8d8bef9SDimitry Andric   if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
557e8d8bef9SDimitry Andric     return;
558e8d8bef9SDimitry Andric 
559e8d8bef9SDimitry Andric   for (auto &I : BitFields) {
560e8d8bef9SDimitry Andric     const FieldDecl *Field = I.first;
561e8d8bef9SDimitry Andric     CGBitFieldInfo &Info = I.second;
562e8d8bef9SDimitry Andric     llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
563e8d8bef9SDimitry Andric     // If the record alignment is less than the type width, we can't enforce a
564e8d8bef9SDimitry Andric     // aligned load, bail out.
565e8d8bef9SDimitry Andric     if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
566e8d8bef9SDimitry Andric         ResLTy->getPrimitiveSizeInBits())
567e8d8bef9SDimitry Andric       continue;
568e8d8bef9SDimitry Andric     // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
569e8d8bef9SDimitry Andric     // for big-endian targets, but it assumes a container of width
570e8d8bef9SDimitry Andric     // Info.StorageSize. Since AAPCS uses a different container size (width
571e8d8bef9SDimitry Andric     // of the type), we first undo that calculation here and redo it once
572e8d8bef9SDimitry Andric     // the bit-field offset within the new container is calculated.
573e8d8bef9SDimitry Andric     const unsigned OldOffset =
574e8d8bef9SDimitry Andric         isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
575e8d8bef9SDimitry Andric     // Offset to the bit-field from the beginning of the struct.
576e8d8bef9SDimitry Andric     const unsigned AbsoluteOffset =
577e8d8bef9SDimitry Andric         Context.toBits(Info.StorageOffset) + OldOffset;
578e8d8bef9SDimitry Andric 
579e8d8bef9SDimitry Andric     // Container size is the width of the bit-field type.
580e8d8bef9SDimitry Andric     const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
581e8d8bef9SDimitry Andric     // Nothing to do if the access uses the desired
582e8d8bef9SDimitry Andric     // container width and is naturally aligned.
583e8d8bef9SDimitry Andric     if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
584e8d8bef9SDimitry Andric       continue;
585e8d8bef9SDimitry Andric 
586e8d8bef9SDimitry Andric     // Offset within the container.
587e8d8bef9SDimitry Andric     unsigned Offset = AbsoluteOffset & (StorageSize - 1);
588e8d8bef9SDimitry Andric     // Bail out if an aligned load of the container cannot cover the entire
589e8d8bef9SDimitry Andric     // bit-field. This can happen for example, if the bit-field is part of a
590e8d8bef9SDimitry Andric     // packed struct. AAPCS does not define access rules for such cases, we let
591e8d8bef9SDimitry Andric     // clang to follow its own rules.
592e8d8bef9SDimitry Andric     if (Offset + Info.Size > StorageSize)
593e8d8bef9SDimitry Andric       continue;
594e8d8bef9SDimitry Andric 
595e8d8bef9SDimitry Andric     // Re-adjust offsets for big-endian targets.
596e8d8bef9SDimitry Andric     if (isBE())
597e8d8bef9SDimitry Andric       Offset = StorageSize - (Offset + Info.Size);
598e8d8bef9SDimitry Andric 
599e8d8bef9SDimitry Andric     const CharUnits StorageOffset =
600e8d8bef9SDimitry Andric         Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
601e8d8bef9SDimitry Andric     const CharUnits End = StorageOffset +
602e8d8bef9SDimitry Andric                           Context.toCharUnitsFromBits(StorageSize) -
603e8d8bef9SDimitry Andric                           CharUnits::One();
604e8d8bef9SDimitry Andric 
605e8d8bef9SDimitry Andric     const ASTRecordLayout &Layout =
606e8d8bef9SDimitry Andric         Context.getASTRecordLayout(Field->getParent());
607e8d8bef9SDimitry Andric     // If we access outside memory outside the record, than bail out.
608e8d8bef9SDimitry Andric     const CharUnits RecordSize = Layout.getSize();
609e8d8bef9SDimitry Andric     if (End >= RecordSize)
610e8d8bef9SDimitry Andric       continue;
611e8d8bef9SDimitry Andric 
612e8d8bef9SDimitry Andric     // Bail out if performing this load would access non-bit-fields members.
613e8d8bef9SDimitry Andric     bool Conflict = false;
614e8d8bef9SDimitry Andric     for (const auto *F : D->fields()) {
615e8d8bef9SDimitry Andric       // Allow sized bit-fields overlaps.
616e8d8bef9SDimitry Andric       if (F->isBitField() && !F->isZeroLengthBitField(Context))
617e8d8bef9SDimitry Andric         continue;
618e8d8bef9SDimitry Andric 
619e8d8bef9SDimitry Andric       const CharUnits FOffset = Context.toCharUnitsFromBits(
620e8d8bef9SDimitry Andric           Layout.getFieldOffset(F->getFieldIndex()));
621e8d8bef9SDimitry Andric 
622e8d8bef9SDimitry Andric       // As C11 defines, a zero sized bit-field defines a barrier, so
623e8d8bef9SDimitry Andric       // fields after and before it should be race condition free.
624e8d8bef9SDimitry Andric       // The AAPCS acknowledges it and imposes no restritions when the
625e8d8bef9SDimitry Andric       // natural container overlaps a zero-length bit-field.
626e8d8bef9SDimitry Andric       if (F->isZeroLengthBitField(Context)) {
627e8d8bef9SDimitry Andric         if (End > FOffset && StorageOffset < FOffset) {
628e8d8bef9SDimitry Andric           Conflict = true;
629e8d8bef9SDimitry Andric           break;
630e8d8bef9SDimitry Andric         }
631e8d8bef9SDimitry Andric       }
632e8d8bef9SDimitry Andric 
633e8d8bef9SDimitry Andric       const CharUnits FEnd =
634e8d8bef9SDimitry Andric           FOffset +
635e8d8bef9SDimitry Andric           Context.toCharUnitsFromBits(
636e8d8bef9SDimitry Andric               Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
637e8d8bef9SDimitry Andric           CharUnits::One();
638e8d8bef9SDimitry Andric       // If no overlap, continue.
639e8d8bef9SDimitry Andric       if (End < FOffset || FEnd < StorageOffset)
640e8d8bef9SDimitry Andric         continue;
641e8d8bef9SDimitry Andric 
642e8d8bef9SDimitry Andric       // The desired load overlaps a non-bit-field member, bail out.
643e8d8bef9SDimitry Andric       Conflict = true;
644e8d8bef9SDimitry Andric       break;
645e8d8bef9SDimitry Andric     }
646e8d8bef9SDimitry Andric 
647e8d8bef9SDimitry Andric     if (Conflict)
648e8d8bef9SDimitry Andric       continue;
649e8d8bef9SDimitry Andric     // Write the new bit-field access parameters.
650e8d8bef9SDimitry Andric     // As the storage offset now is defined as the number of elements from the
651e8d8bef9SDimitry Andric     // start of the structure, we should divide the Offset by the element size.
652e8d8bef9SDimitry Andric     Info.VolatileStorageOffset =
653e8d8bef9SDimitry Andric         StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
654e8d8bef9SDimitry Andric     Info.VolatileStorageSize = StorageSize;
655e8d8bef9SDimitry Andric     Info.VolatileOffset = Offset;
656e8d8bef9SDimitry Andric   }
657e8d8bef9SDimitry Andric }
658e8d8bef9SDimitry Andric 
accumulateVPtrs()6590b57cec5SDimitry Andric void CGRecordLowering::accumulateVPtrs() {
6600b57cec5SDimitry Andric   if (Layout.hasOwnVFPtr())
6615f757f3fSDimitry Andric     Members.push_back(
6625f757f3fSDimitry Andric         MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
6635f757f3fSDimitry Andric                    llvm::PointerType::getUnqual(Types.getLLVMContext())));
6640b57cec5SDimitry Andric   if (Layout.hasOwnVBPtr())
6655f757f3fSDimitry Andric     Members.push_back(
6665f757f3fSDimitry Andric         MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
6675f757f3fSDimitry Andric                    llvm::PointerType::getUnqual(Types.getLLVMContext())));
6680b57cec5SDimitry Andric }
6690b57cec5SDimitry Andric 
accumulateVBases()6700b57cec5SDimitry Andric void CGRecordLowering::accumulateVBases() {
6710b57cec5SDimitry Andric   CharUnits ScissorOffset = Layout.getNonVirtualSize();
6720b57cec5SDimitry Andric   // In the itanium ABI, it's possible to place a vbase at a dsize that is
6730b57cec5SDimitry Andric   // smaller than the nvsize.  Here we check to see if such a base is placed
6740b57cec5SDimitry Andric   // before the nvsize and set the scissor offset to that, instead of the
6750b57cec5SDimitry Andric   // nvsize.
6760b57cec5SDimitry Andric   if (isOverlappingVBaseABI())
6770b57cec5SDimitry Andric     for (const auto &Base : RD->vbases()) {
6780b57cec5SDimitry Andric       const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
6790b57cec5SDimitry Andric       if (BaseDecl->isEmpty())
6800b57cec5SDimitry Andric         continue;
6810b57cec5SDimitry Andric       // If the vbase is a primary virtual base of some base, then it doesn't
6820b57cec5SDimitry Andric       // get its own storage location but instead lives inside of that base.
6830b57cec5SDimitry Andric       if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
6840b57cec5SDimitry Andric         continue;
6850b57cec5SDimitry Andric       ScissorOffset = std::min(ScissorOffset,
6860b57cec5SDimitry Andric                                Layout.getVBaseClassOffset(BaseDecl));
6870b57cec5SDimitry Andric     }
6880b57cec5SDimitry Andric   Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
6890b57cec5SDimitry Andric                                RD));
6900b57cec5SDimitry Andric   for (const auto &Base : RD->vbases()) {
6910b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
6920b57cec5SDimitry Andric     if (BaseDecl->isEmpty())
6930b57cec5SDimitry Andric       continue;
6940b57cec5SDimitry Andric     CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
6950b57cec5SDimitry Andric     // If the vbase is a primary virtual base of some base, then it doesn't
6960b57cec5SDimitry Andric     // get its own storage location but instead lives inside of that base.
6970b57cec5SDimitry Andric     if (isOverlappingVBaseABI() &&
6980b57cec5SDimitry Andric         Context.isNearlyEmpty(BaseDecl) &&
6990b57cec5SDimitry Andric         !hasOwnStorage(RD, BaseDecl)) {
7000b57cec5SDimitry Andric       Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
7010b57cec5SDimitry Andric                                    BaseDecl));
7020b57cec5SDimitry Andric       continue;
7030b57cec5SDimitry Andric     }
7040b57cec5SDimitry Andric     // If we've got a vtordisp, add it as a storage type.
7050b57cec5SDimitry Andric     if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
7060b57cec5SDimitry Andric       Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
7070b57cec5SDimitry Andric                                     getIntNType(32)));
7080b57cec5SDimitry Andric     Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
7090b57cec5SDimitry Andric                                  getStorageType(BaseDecl), BaseDecl));
7100b57cec5SDimitry Andric   }
7110b57cec5SDimitry Andric }
7120b57cec5SDimitry Andric 
hasOwnStorage(const CXXRecordDecl * Decl,const CXXRecordDecl * Query)7130b57cec5SDimitry Andric bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
7140b57cec5SDimitry Andric                                      const CXXRecordDecl *Query) {
7150b57cec5SDimitry Andric   const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
7160b57cec5SDimitry Andric   if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
7170b57cec5SDimitry Andric     return false;
7180b57cec5SDimitry Andric   for (const auto &Base : Decl->bases())
7190b57cec5SDimitry Andric     if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
7200b57cec5SDimitry Andric       return false;
7210b57cec5SDimitry Andric   return true;
7220b57cec5SDimitry Andric }
7230b57cec5SDimitry Andric 
calculateZeroInit()7240b57cec5SDimitry Andric void CGRecordLowering::calculateZeroInit() {
7250b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
7260b57cec5SDimitry Andric                                                MemberEnd = Members.end();
7270b57cec5SDimitry Andric        IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
7280b57cec5SDimitry Andric     if (Member->Kind == MemberInfo::Field) {
7290b57cec5SDimitry Andric       if (!Member->FD || isZeroInitializable(Member->FD))
7300b57cec5SDimitry Andric         continue;
7310b57cec5SDimitry Andric       IsZeroInitializable = IsZeroInitializableAsBase = false;
7320b57cec5SDimitry Andric     } else if (Member->Kind == MemberInfo::Base ||
7330b57cec5SDimitry Andric                Member->Kind == MemberInfo::VBase) {
7340b57cec5SDimitry Andric       if (isZeroInitializable(Member->RD))
7350b57cec5SDimitry Andric         continue;
7360b57cec5SDimitry Andric       IsZeroInitializable = false;
7370b57cec5SDimitry Andric       if (Member->Kind == MemberInfo::Base)
7380b57cec5SDimitry Andric         IsZeroInitializableAsBase = false;
7390b57cec5SDimitry Andric     }
7400b57cec5SDimitry Andric   }
7410b57cec5SDimitry Andric }
7420b57cec5SDimitry Andric 
clipTailPadding()7430b57cec5SDimitry Andric void CGRecordLowering::clipTailPadding() {
7440b57cec5SDimitry Andric   std::vector<MemberInfo>::iterator Prior = Members.begin();
7450b57cec5SDimitry Andric   CharUnits Tail = getSize(Prior->Data);
7460b57cec5SDimitry Andric   for (std::vector<MemberInfo>::iterator Member = Prior + 1,
7470b57cec5SDimitry Andric                                          MemberEnd = Members.end();
7480b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
7490b57cec5SDimitry Andric     // Only members with data and the scissor can cut into tail padding.
7500b57cec5SDimitry Andric     if (!Member->Data && Member->Kind != MemberInfo::Scissor)
7510b57cec5SDimitry Andric       continue;
7520b57cec5SDimitry Andric     if (Member->Offset < Tail) {
7530b57cec5SDimitry Andric       assert(Prior->Kind == MemberInfo::Field &&
7540b57cec5SDimitry Andric              "Only storage fields have tail padding!");
7550b57cec5SDimitry Andric       if (!Prior->FD || Prior->FD->isBitField())
7560b57cec5SDimitry Andric         Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
7570b57cec5SDimitry Andric             cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
7580b57cec5SDimitry Andric       else {
7590b57cec5SDimitry Andric         assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
7600b57cec5SDimitry Andric                "should not have reused this field's tail padding");
7610b57cec5SDimitry Andric         Prior->Data = getByteArrayType(
762e8d8bef9SDimitry Andric             Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
7630b57cec5SDimitry Andric       }
7640b57cec5SDimitry Andric     }
7650b57cec5SDimitry Andric     if (Member->Data)
7660b57cec5SDimitry Andric       Prior = Member;
7670b57cec5SDimitry Andric     Tail = Prior->Offset + getSize(Prior->Data);
7680b57cec5SDimitry Andric   }
7690b57cec5SDimitry Andric }
7700b57cec5SDimitry Andric 
determinePacked(bool NVBaseType)7710b57cec5SDimitry Andric void CGRecordLowering::determinePacked(bool NVBaseType) {
7720b57cec5SDimitry Andric   if (Packed)
7730b57cec5SDimitry Andric     return;
7740b57cec5SDimitry Andric   CharUnits Alignment = CharUnits::One();
7750b57cec5SDimitry Andric   CharUnits NVAlignment = CharUnits::One();
7760b57cec5SDimitry Andric   CharUnits NVSize =
7770b57cec5SDimitry Andric       !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
7780b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
7790b57cec5SDimitry Andric                                                MemberEnd = Members.end();
7800b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
7810b57cec5SDimitry Andric     if (!Member->Data)
7820b57cec5SDimitry Andric       continue;
7830b57cec5SDimitry Andric     // If any member falls at an offset that it not a multiple of its alignment,
7840b57cec5SDimitry Andric     // then the entire record must be packed.
7850b57cec5SDimitry Andric     if (Member->Offset % getAlignment(Member->Data))
7860b57cec5SDimitry Andric       Packed = true;
7870b57cec5SDimitry Andric     if (Member->Offset < NVSize)
7880b57cec5SDimitry Andric       NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
7890b57cec5SDimitry Andric     Alignment = std::max(Alignment, getAlignment(Member->Data));
7900b57cec5SDimitry Andric   }
7910b57cec5SDimitry Andric   // If the size of the record (the capstone's offset) is not a multiple of the
7920b57cec5SDimitry Andric   // record's alignment, it must be packed.
7930b57cec5SDimitry Andric   if (Members.back().Offset % Alignment)
7940b57cec5SDimitry Andric     Packed = true;
7950b57cec5SDimitry Andric   // If the non-virtual sub-object is not a multiple of the non-virtual
7960b57cec5SDimitry Andric   // sub-object's alignment, it must be packed.  We cannot have a packed
7970b57cec5SDimitry Andric   // non-virtual sub-object and an unpacked complete object or vise versa.
7980b57cec5SDimitry Andric   if (NVSize % NVAlignment)
7990b57cec5SDimitry Andric     Packed = true;
8000b57cec5SDimitry Andric   // Update the alignment of the sentinel.
8010b57cec5SDimitry Andric   if (!Packed)
8020b57cec5SDimitry Andric     Members.back().Data = getIntNType(Context.toBits(Alignment));
8030b57cec5SDimitry Andric }
8040b57cec5SDimitry Andric 
insertPadding()8050b57cec5SDimitry Andric void CGRecordLowering::insertPadding() {
8060b57cec5SDimitry Andric   std::vector<std::pair<CharUnits, CharUnits> > Padding;
8070b57cec5SDimitry Andric   CharUnits Size = CharUnits::Zero();
8080b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
8090b57cec5SDimitry Andric                                                MemberEnd = Members.end();
8100b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
8110b57cec5SDimitry Andric     if (!Member->Data)
8120b57cec5SDimitry Andric       continue;
8130b57cec5SDimitry Andric     CharUnits Offset = Member->Offset;
8140b57cec5SDimitry Andric     assert(Offset >= Size);
8150b57cec5SDimitry Andric     // Insert padding if we need to.
8160b57cec5SDimitry Andric     if (Offset !=
8170b57cec5SDimitry Andric         Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
8180b57cec5SDimitry Andric       Padding.push_back(std::make_pair(Size, Offset - Size));
8190b57cec5SDimitry Andric     Size = Offset + getSize(Member->Data);
8200b57cec5SDimitry Andric   }
8210b57cec5SDimitry Andric   if (Padding.empty())
8220b57cec5SDimitry Andric     return;
8230b57cec5SDimitry Andric   // Add the padding to the Members list and sort it.
8240b57cec5SDimitry Andric   for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
8250b57cec5SDimitry Andric         Pad = Padding.begin(), PadEnd = Padding.end();
8260b57cec5SDimitry Andric         Pad != PadEnd; ++Pad)
8270b57cec5SDimitry Andric     Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
8280b57cec5SDimitry Andric   llvm::stable_sort(Members);
8290b57cec5SDimitry Andric }
8300b57cec5SDimitry Andric 
fillOutputFields()8310b57cec5SDimitry Andric void CGRecordLowering::fillOutputFields() {
8320b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
8330b57cec5SDimitry Andric                                                MemberEnd = Members.end();
8340b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
8350b57cec5SDimitry Andric     if (Member->Data)
8360b57cec5SDimitry Andric       FieldTypes.push_back(Member->Data);
8370b57cec5SDimitry Andric     if (Member->Kind == MemberInfo::Field) {
8380b57cec5SDimitry Andric       if (Member->FD)
8390b57cec5SDimitry Andric         Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
8400b57cec5SDimitry Andric       // A field without storage must be a bitfield.
8410b57cec5SDimitry Andric       if (!Member->Data)
8420b57cec5SDimitry Andric         setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
8430b57cec5SDimitry Andric     } else if (Member->Kind == MemberInfo::Base)
8440b57cec5SDimitry Andric       NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
8450b57cec5SDimitry Andric     else if (Member->Kind == MemberInfo::VBase)
8460b57cec5SDimitry Andric       VirtualBases[Member->RD] = FieldTypes.size() - 1;
8470b57cec5SDimitry Andric   }
8480b57cec5SDimitry Andric }
8490b57cec5SDimitry Andric 
MakeInfo(CodeGenTypes & Types,const FieldDecl * FD,uint64_t Offset,uint64_t Size,uint64_t StorageSize,CharUnits StorageOffset)8500b57cec5SDimitry Andric CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
8510b57cec5SDimitry Andric                                         const FieldDecl *FD,
8520b57cec5SDimitry Andric                                         uint64_t Offset, uint64_t Size,
8530b57cec5SDimitry Andric                                         uint64_t StorageSize,
8540b57cec5SDimitry Andric                                         CharUnits StorageOffset) {
8550b57cec5SDimitry Andric   // This function is vestigial from CGRecordLayoutBuilder days but is still
8560b57cec5SDimitry Andric   // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
8570b57cec5SDimitry Andric   // when addressed will allow for the removal of this function.
8580b57cec5SDimitry Andric   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
8590b57cec5SDimitry Andric   CharUnits TypeSizeInBytes =
8600b57cec5SDimitry Andric     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
8610b57cec5SDimitry Andric   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
8620b57cec5SDimitry Andric 
8630b57cec5SDimitry Andric   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
8640b57cec5SDimitry Andric 
8650b57cec5SDimitry Andric   if (Size > TypeSizeInBits) {
8660b57cec5SDimitry Andric     // We have a wide bit-field. The extra bits are only used for padding, so
8670b57cec5SDimitry Andric     // if we have a bitfield of type T, with size N:
8680b57cec5SDimitry Andric     //
8690b57cec5SDimitry Andric     // T t : N;
8700b57cec5SDimitry Andric     //
8710b57cec5SDimitry Andric     // We can just assume that it's:
8720b57cec5SDimitry Andric     //
8730b57cec5SDimitry Andric     // T t : sizeof(T);
8740b57cec5SDimitry Andric     //
8750b57cec5SDimitry Andric     Size = TypeSizeInBits;
8760b57cec5SDimitry Andric   }
8770b57cec5SDimitry Andric 
8780b57cec5SDimitry Andric   // Reverse the bit offsets for big endian machines. Because we represent
8790b57cec5SDimitry Andric   // a bitfield as a single large integer load, we can imagine the bits
8800b57cec5SDimitry Andric   // counting from the most-significant-bit instead of the
8810b57cec5SDimitry Andric   // least-significant-bit.
8820b57cec5SDimitry Andric   if (Types.getDataLayout().isBigEndian()) {
8830b57cec5SDimitry Andric     Offset = StorageSize - (Offset + Size);
8840b57cec5SDimitry Andric   }
8850b57cec5SDimitry Andric 
8860b57cec5SDimitry Andric   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
8870b57cec5SDimitry Andric }
8880b57cec5SDimitry Andric 
8895ffd83dbSDimitry Andric std::unique_ptr<CGRecordLayout>
ComputeRecordLayout(const RecordDecl * D,llvm::StructType * Ty)8905ffd83dbSDimitry Andric CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
8910b57cec5SDimitry Andric   CGRecordLowering Builder(*this, D, /*Packed=*/false);
8920b57cec5SDimitry Andric 
8930b57cec5SDimitry Andric   Builder.lower(/*NonVirtualBaseType=*/false);
8940b57cec5SDimitry Andric 
8950b57cec5SDimitry Andric   // If we're in C++, compute the base subobject type.
8960b57cec5SDimitry Andric   llvm::StructType *BaseTy = nullptr;
89706c3fb27SDimitry Andric   if (isa<CXXRecordDecl>(D)) {
8980b57cec5SDimitry Andric     BaseTy = Ty;
8990b57cec5SDimitry Andric     if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
9000b57cec5SDimitry Andric       CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
9010b57cec5SDimitry Andric       BaseBuilder.lower(/*NonVirtualBaseType=*/true);
9020b57cec5SDimitry Andric       BaseTy = llvm::StructType::create(
9030b57cec5SDimitry Andric           getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
9040b57cec5SDimitry Andric       addRecordTypeName(D, BaseTy, ".base");
9050b57cec5SDimitry Andric       // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
9060b57cec5SDimitry Andric       // on both of them with the same index.
9070b57cec5SDimitry Andric       assert(Builder.Packed == BaseBuilder.Packed &&
9080b57cec5SDimitry Andric              "Non-virtual and complete types must agree on packedness");
9090b57cec5SDimitry Andric     }
9100b57cec5SDimitry Andric   }
9110b57cec5SDimitry Andric 
9120b57cec5SDimitry Andric   // Fill in the struct *after* computing the base type.  Filling in the body
9130b57cec5SDimitry Andric   // signifies that the type is no longer opaque and record layout is complete,
9140b57cec5SDimitry Andric   // but we may need to recursively layout D while laying D out as a base type.
9150b57cec5SDimitry Andric   Ty->setBody(Builder.FieldTypes, Builder.Packed);
9160b57cec5SDimitry Andric 
9175ffd83dbSDimitry Andric   auto RL = std::make_unique<CGRecordLayout>(
9185ffd83dbSDimitry Andric       Ty, BaseTy, (bool)Builder.IsZeroInitializable,
9195ffd83dbSDimitry Andric       (bool)Builder.IsZeroInitializableAsBase);
9200b57cec5SDimitry Andric 
9210b57cec5SDimitry Andric   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
9220b57cec5SDimitry Andric   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
9230b57cec5SDimitry Andric 
9240b57cec5SDimitry Andric   // Add all the field numbers.
9250b57cec5SDimitry Andric   RL->FieldInfo.swap(Builder.Fields);
9260b57cec5SDimitry Andric 
9270b57cec5SDimitry Andric   // Add bitfield info.
9280b57cec5SDimitry Andric   RL->BitFields.swap(Builder.BitFields);
9290b57cec5SDimitry Andric 
9300b57cec5SDimitry Andric   // Dump the layout, if requested.
9310b57cec5SDimitry Andric   if (getContext().getLangOpts().DumpRecordLayouts) {
9320b57cec5SDimitry Andric     llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
9330b57cec5SDimitry Andric     llvm::outs() << "Record: ";
9340b57cec5SDimitry Andric     D->dump(llvm::outs());
9350b57cec5SDimitry Andric     llvm::outs() << "\nLayout: ";
9360b57cec5SDimitry Andric     RL->print(llvm::outs());
9370b57cec5SDimitry Andric   }
9380b57cec5SDimitry Andric 
9390b57cec5SDimitry Andric #ifndef NDEBUG
9400b57cec5SDimitry Andric   // Verify that the computed LLVM struct size matches the AST layout size.
9410b57cec5SDimitry Andric   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
9420b57cec5SDimitry Andric 
9430b57cec5SDimitry Andric   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
9440b57cec5SDimitry Andric   assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
9450b57cec5SDimitry Andric          "Type size mismatch!");
9460b57cec5SDimitry Andric 
9470b57cec5SDimitry Andric   if (BaseTy) {
9480b57cec5SDimitry Andric     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
9490b57cec5SDimitry Andric 
9500b57cec5SDimitry Andric     uint64_t AlignedNonVirtualTypeSizeInBits =
9510b57cec5SDimitry Andric       getContext().toBits(NonVirtualSize);
9520b57cec5SDimitry Andric 
9530b57cec5SDimitry Andric     assert(AlignedNonVirtualTypeSizeInBits ==
9540b57cec5SDimitry Andric            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
9550b57cec5SDimitry Andric            "Type size mismatch!");
9560b57cec5SDimitry Andric   }
9570b57cec5SDimitry Andric 
9580b57cec5SDimitry Andric   // Verify that the LLVM and AST field offsets agree.
9590b57cec5SDimitry Andric   llvm::StructType *ST = RL->getLLVMType();
9600b57cec5SDimitry Andric   const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
9610b57cec5SDimitry Andric 
9620b57cec5SDimitry Andric   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
9630b57cec5SDimitry Andric   RecordDecl::field_iterator it = D->field_begin();
9640b57cec5SDimitry Andric   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
9650b57cec5SDimitry Andric     const FieldDecl *FD = *it;
9660b57cec5SDimitry Andric 
9670b57cec5SDimitry Andric     // Ignore zero-sized fields.
9680b57cec5SDimitry Andric     if (FD->isZeroSize(getContext()))
9690b57cec5SDimitry Andric       continue;
9700b57cec5SDimitry Andric 
9710b57cec5SDimitry Andric     // For non-bit-fields, just check that the LLVM struct offset matches the
9720b57cec5SDimitry Andric     // AST offset.
9730b57cec5SDimitry Andric     if (!FD->isBitField()) {
9740b57cec5SDimitry Andric       unsigned FieldNo = RL->getLLVMFieldNo(FD);
9750b57cec5SDimitry Andric       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
9760b57cec5SDimitry Andric              "Invalid field offset!");
9770b57cec5SDimitry Andric       continue;
9780b57cec5SDimitry Andric     }
9790b57cec5SDimitry Andric 
9800b57cec5SDimitry Andric     // Ignore unnamed bit-fields.
9810b57cec5SDimitry Andric     if (!FD->getDeclName())
9820b57cec5SDimitry Andric       continue;
9830b57cec5SDimitry Andric 
9840b57cec5SDimitry Andric     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
9850b57cec5SDimitry Andric     llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
9860b57cec5SDimitry Andric 
9870b57cec5SDimitry Andric     // Unions have overlapping elements dictating their layout, but for
9880b57cec5SDimitry Andric     // non-unions we can verify that this section of the layout is the exact
9890b57cec5SDimitry Andric     // expected size.
9900b57cec5SDimitry Andric     if (D->isUnion()) {
9910b57cec5SDimitry Andric       // For unions we verify that the start is zero and the size
9920b57cec5SDimitry Andric       // is in-bounds. However, on BE systems, the offset may be non-zero, but
9930b57cec5SDimitry Andric       // the size + offset should match the storage size in that case as it
9940b57cec5SDimitry Andric       // "starts" at the back.
9950b57cec5SDimitry Andric       if (getDataLayout().isBigEndian())
9960b57cec5SDimitry Andric         assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
9970b57cec5SDimitry Andric                Info.StorageSize &&
9980b57cec5SDimitry Andric                "Big endian union bitfield does not end at the back");
9990b57cec5SDimitry Andric       else
10000b57cec5SDimitry Andric         assert(Info.Offset == 0 &&
10010b57cec5SDimitry Andric                "Little endian union bitfield with a non-zero offset");
10020b57cec5SDimitry Andric       assert(Info.StorageSize <= SL->getSizeInBits() &&
10030b57cec5SDimitry Andric              "Union not large enough for bitfield storage");
10040b57cec5SDimitry Andric     } else {
1005e8d8bef9SDimitry Andric       assert((Info.StorageSize ==
1006e8d8bef9SDimitry Andric                   getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
1007e8d8bef9SDimitry Andric               Info.VolatileStorageSize ==
1008e8d8bef9SDimitry Andric                   getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
10090b57cec5SDimitry Andric              "Storage size does not match the element type size");
10100b57cec5SDimitry Andric     }
10110b57cec5SDimitry Andric     assert(Info.Size > 0 && "Empty bitfield!");
10120b57cec5SDimitry Andric     assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
10130b57cec5SDimitry Andric            "Bitfield outside of its allocated storage");
10140b57cec5SDimitry Andric   }
10150b57cec5SDimitry Andric #endif
10160b57cec5SDimitry Andric 
10170b57cec5SDimitry Andric   return RL;
10180b57cec5SDimitry Andric }
10190b57cec5SDimitry Andric 
print(raw_ostream & OS) const10200b57cec5SDimitry Andric void CGRecordLayout::print(raw_ostream &OS) const {
10210b57cec5SDimitry Andric   OS << "<CGRecordLayout\n";
10220b57cec5SDimitry Andric   OS << "  LLVMType:" << *CompleteObjectType << "\n";
10230b57cec5SDimitry Andric   if (BaseSubobjectType)
10240b57cec5SDimitry Andric     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
10250b57cec5SDimitry Andric   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
10260b57cec5SDimitry Andric   OS << "  BitFields:[\n";
10270b57cec5SDimitry Andric 
10280b57cec5SDimitry Andric   // Print bit-field infos in declaration order.
10290b57cec5SDimitry Andric   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
10300b57cec5SDimitry Andric   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
10310b57cec5SDimitry Andric          it = BitFields.begin(), ie = BitFields.end();
10320b57cec5SDimitry Andric        it != ie; ++it) {
10330b57cec5SDimitry Andric     const RecordDecl *RD = it->first->getParent();
10340b57cec5SDimitry Andric     unsigned Index = 0;
10350b57cec5SDimitry Andric     for (RecordDecl::field_iterator
10360b57cec5SDimitry Andric            it2 = RD->field_begin(); *it2 != it->first; ++it2)
10370b57cec5SDimitry Andric       ++Index;
10380b57cec5SDimitry Andric     BFIs.push_back(std::make_pair(Index, &it->second));
10390b57cec5SDimitry Andric   }
10400b57cec5SDimitry Andric   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
10410b57cec5SDimitry Andric   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
10420b57cec5SDimitry Andric     OS.indent(4);
10430b57cec5SDimitry Andric     BFIs[i].second->print(OS);
10440b57cec5SDimitry Andric     OS << "\n";
10450b57cec5SDimitry Andric   }
10460b57cec5SDimitry Andric 
10470b57cec5SDimitry Andric   OS << "]>\n";
10480b57cec5SDimitry Andric }
10490b57cec5SDimitry Andric 
dump() const10500b57cec5SDimitry Andric LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
10510b57cec5SDimitry Andric   print(llvm::errs());
10520b57cec5SDimitry Andric }
10530b57cec5SDimitry Andric 
print(raw_ostream & OS) const10540b57cec5SDimitry Andric void CGBitFieldInfo::print(raw_ostream &OS) const {
10550b57cec5SDimitry Andric   OS << "<CGBitFieldInfo"
1056e8d8bef9SDimitry Andric      << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
10570b57cec5SDimitry Andric      << " StorageSize:" << StorageSize
1058e8d8bef9SDimitry Andric      << " StorageOffset:" << StorageOffset.getQuantity()
1059e8d8bef9SDimitry Andric      << " VolatileOffset:" << VolatileOffset
1060e8d8bef9SDimitry Andric      << " VolatileStorageSize:" << VolatileStorageSize
1061e8d8bef9SDimitry Andric      << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
10620b57cec5SDimitry Andric }
10630b57cec5SDimitry Andric 
dump() const10640b57cec5SDimitry Andric LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
10650b57cec5SDimitry Andric   print(llvm::errs());
10660b57cec5SDimitry Andric }
1067