1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Builder implementation for CGRecordLayout objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGRecordLayout.h"
14 #include "CGCXXABI.h"
15 #include "CodeGenTypes.h"
16 #include "clang/AST/ASTContext.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/CXXInheritance.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/Expr.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/Basic/CodeGenOptions.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Support/raw_ostream.h"
29 using namespace clang;
30 using namespace CodeGen;
31 
32 namespace {
33 /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
34 /// llvm::Type.  Some of the lowering is straightforward, some is not.  Here we
35 /// detail some of the complexities and weirdnesses here.
36 /// * LLVM does not have unions - Unions can, in theory be represented by any
37 ///   llvm::Type with correct size.  We choose a field via a specific heuristic
38 ///   and add padding if necessary.
39 /// * LLVM does not have bitfields - Bitfields are collected into contiguous
40 ///   runs and allocated as a single storage type for the run.  ASTRecordLayout
41 ///   contains enough information to determine where the runs break.  Microsoft
42 ///   and Itanium follow different rules and use different codepaths.
43 /// * It is desired that, when possible, bitfields use the appropriate iN type
44 ///   when lowered to llvm types.  For example unsigned x : 24 gets lowered to
45 ///   i24.  This isn't always possible because i24 has storage size of 32 bit
46 ///   and if it is possible to use that extra byte of padding we must use
47 ///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
48 ///   C++ examples that require clipping:
49 ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
50 ///   struct A { int a : 24; }; // a must be clipped because a struct like B
51 //    could exist: struct B : A { char b; }; // b goes at offset 3
52 /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
53 ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
54 ///   has an underlying storage type.  Therefore empty structures containing
55 ///   zero sized subobjects such as empty records or zero sized arrays still get
56 ///   a zero sized (empty struct) storage type.
57 /// * Clang reads the complete type rather than the base type when generating
58 ///   code to access fields.  Bitfields in tail position with tail padding may
59 ///   be clipped in the base class but not the complete class (we may discover
60 ///   that the tail padding is not used in the complete class.) However,
61 ///   because LLVM reads from the complete type it can generate incorrect code
62 ///   if we do not clip the tail padding off of the bitfield in the complete
63 ///   layout.  This introduces a somewhat awkward extra unnecessary clip stage.
64 ///   The location of the clip is stored internally as a sentinel of type
65 ///   SCISSOR.  If LLVM were updated to read base types (which it probably
66 ///   should because locations of things such as VBases are bogus in the llvm
67 ///   type anyway) then we could eliminate the SCISSOR.
68 /// * Itanium allows nearly empty primary virtual bases.  These bases don't get
69 ///   get their own storage because they're laid out as part of another base
70 ///   or at the beginning of the structure.  Determining if a VBase actually
71 ///   gets storage awkwardly involves a walk of all bases.
72 /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
73 struct CGRecordLowering {
74   // MemberInfo is a helper structure that contains information about a record
75   // member.  In additional to the standard member types, there exists a
76   // sentinel member type that ensures correct rounding.
77   struct MemberInfo {
78     CharUnits Offset;
79     enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
80     llvm::Type *Data;
81     union {
82       const FieldDecl *FD;
83       const CXXRecordDecl *RD;
84     };
85     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
86                const FieldDecl *FD = nullptr)
87       : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
88     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
89                const CXXRecordDecl *RD)
90       : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
91     // MemberInfos are sorted so we define a < operator.
92     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
93   };
94   // The constructor.
95   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
96   // Short helper routines.
97   /// Constructs a MemberInfo instance from an offset and llvm::Type *.
98   MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
99     return MemberInfo(Offset, MemberInfo::Field, Data);
100   }
101 
102   /// The Microsoft bitfield layout rule allocates discrete storage
103   /// units of the field's formal type and only combines adjacent
104   /// fields of the same formal type.  We want to emit a layout with
105   /// these discrete storage units instead of combining them into a
106   /// continuous run.
107   bool isDiscreteBitFieldABI() {
108     return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
109            D->isMsStruct(Context);
110   }
111 
112   /// Helper function to check if we are targeting AAPCS.
113   bool isAAPCS() const {
114     return Context.getTargetInfo().getABI().startswith("aapcs");
115   }
116 
117   /// Helper function to check if the target machine is BigEndian.
118   bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
119 
120   /// The Itanium base layout rule allows virtual bases to overlap
121   /// other bases, which complicates layout in specific ways.
122   ///
123   /// Note specifically that the ms_struct attribute doesn't change this.
124   bool isOverlappingVBaseABI() {
125     return !Context.getTargetInfo().getCXXABI().isMicrosoft();
126   }
127 
128   /// Wraps llvm::Type::getIntNTy with some implicit arguments.
129   llvm::Type *getIntNType(uint64_t NumBits) {
130     unsigned AlignedBits = llvm::alignTo(NumBits, Context.getCharWidth());
131     return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
132   }
133   /// Get the LLVM type sized as one character unit.
134   llvm::Type *getCharType() {
135     return llvm::Type::getIntNTy(Types.getLLVMContext(),
136                                  Context.getCharWidth());
137   }
138   /// Gets an llvm type of size NumChars and alignment 1.
139   llvm::Type *getByteArrayType(CharUnits NumChars) {
140     assert(!NumChars.isZero() && "Empty byte arrays aren't allowed.");
141     llvm::Type *Type = getCharType();
142     return NumChars == CharUnits::One() ? Type :
143         (llvm::Type *)llvm::ArrayType::get(Type, NumChars.getQuantity());
144   }
145   /// Gets the storage type for a field decl and handles storage
146   /// for itanium bitfields that are smaller than their declared type.
147   llvm::Type *getStorageType(const FieldDecl *FD) {
148     llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
149     if (!FD->isBitField()) return Type;
150     if (isDiscreteBitFieldABI()) return Type;
151     return getIntNType(std::min(FD->getBitWidthValue(Context),
152                              (unsigned)Context.toBits(getSize(Type))));
153   }
154   /// Gets the llvm Basesubobject type from a CXXRecordDecl.
155   llvm::Type *getStorageType(const CXXRecordDecl *RD) {
156     return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
157   }
158   CharUnits bitsToCharUnits(uint64_t BitOffset) {
159     return Context.toCharUnitsFromBits(BitOffset);
160   }
161   CharUnits getSize(llvm::Type *Type) {
162     return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
163   }
164   CharUnits getAlignment(llvm::Type *Type) {
165     return CharUnits::fromQuantity(DataLayout.getABITypeAlign(Type));
166   }
167   bool isZeroInitializable(const FieldDecl *FD) {
168     return Types.isZeroInitializable(FD->getType());
169   }
170   bool isZeroInitializable(const RecordDecl *RD) {
171     return Types.isZeroInitializable(RD);
172   }
173   void appendPaddingBytes(CharUnits Size) {
174     if (!Size.isZero())
175       FieldTypes.push_back(getByteArrayType(Size));
176   }
177   uint64_t getFieldBitOffset(const FieldDecl *FD) {
178     return Layout.getFieldOffset(FD->getFieldIndex());
179   }
180   // Layout routines.
181   void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
182                        llvm::Type *StorageType);
183   /// Lowers an ASTRecordLayout to a llvm type.
184   void lower(bool NonVirtualBaseType);
185   void lowerUnion(bool isNoUniqueAddress);
186   void accumulateFields();
187   void accumulateBitFields(RecordDecl::field_iterator Field,
188                            RecordDecl::field_iterator FieldEnd);
189   void computeVolatileBitfields();
190   void accumulateBases();
191   void accumulateVPtrs();
192   void accumulateVBases();
193   /// Recursively searches all of the bases to find out if a vbase is
194   /// not the primary vbase of some base class.
195   bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
196   void calculateZeroInit();
197   /// Lowers bitfield storage types to I8 arrays for bitfields with tail
198   /// padding that is or can potentially be used.
199   void clipTailPadding();
200   /// Determines if we need a packed llvm struct.
201   void determinePacked(bool NVBaseType);
202   /// Inserts padding everywhere it's needed.
203   void insertPadding();
204   /// Fills out the structures that are ultimately consumed.
205   void fillOutputFields();
206   // Input memoization fields.
207   CodeGenTypes &Types;
208   const ASTContext &Context;
209   const RecordDecl *D;
210   const CXXRecordDecl *RD;
211   const ASTRecordLayout &Layout;
212   const llvm::DataLayout &DataLayout;
213   // Helpful intermediate data-structures.
214   std::vector<MemberInfo> Members;
215   // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
216   SmallVector<llvm::Type *, 16> FieldTypes;
217   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
218   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
219   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
220   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
221   bool IsZeroInitializable : 1;
222   bool IsZeroInitializableAsBase : 1;
223   bool Packed : 1;
224 private:
225   CGRecordLowering(const CGRecordLowering &) = delete;
226   void operator =(const CGRecordLowering &) = delete;
227 };
228 } // namespace {
229 
230 CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
231                                    bool Packed)
232     : Types(Types), Context(Types.getContext()), D(D),
233       RD(dyn_cast<CXXRecordDecl>(D)),
234       Layout(Types.getContext().getASTRecordLayout(D)),
235       DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
236       IsZeroInitializableAsBase(true), Packed(Packed) {}
237 
238 void CGRecordLowering::setBitFieldInfo(
239     const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
240   CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
241   Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
242   Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
243   Info.Size = FD->getBitWidthValue(Context);
244   Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
245   Info.StorageOffset = StartOffset;
246   if (Info.Size > Info.StorageSize)
247     Info.Size = Info.StorageSize;
248   // Reverse the bit offsets for big endian machines. Because we represent
249   // a bitfield as a single large integer load, we can imagine the bits
250   // counting from the most-significant-bit instead of the
251   // least-significant-bit.
252   if (DataLayout.isBigEndian())
253     Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
254 
255   Info.VolatileStorageSize = 0;
256   Info.VolatileOffset = 0;
257   Info.VolatileStorageOffset = CharUnits::Zero();
258 }
259 
260 void CGRecordLowering::lower(bool NVBaseType) {
261   // The lowering process implemented in this function takes a variety of
262   // carefully ordered phases.
263   // 1) Store all members (fields and bases) in a list and sort them by offset.
264   // 2) Add a 1-byte capstone member at the Size of the structure.
265   // 3) Clip bitfield storages members if their tail padding is or might be
266   //    used by another field or base.  The clipping process uses the capstone
267   //    by treating it as another object that occurs after the record.
268   // 4) Determine if the llvm-struct requires packing.  It's important that this
269   //    phase occur after clipping, because clipping changes the llvm type.
270   //    This phase reads the offset of the capstone when determining packedness
271   //    and updates the alignment of the capstone to be equal of the alignment
272   //    of the record after doing so.
273   // 5) Insert padding everywhere it is needed.  This phase requires 'Packed' to
274   //    have been computed and needs to know the alignment of the record in
275   //    order to understand if explicit tail padding is needed.
276   // 6) Remove the capstone, we don't need it anymore.
277   // 7) Determine if this record can be zero-initialized.  This phase could have
278   //    been placed anywhere after phase 1.
279   // 8) Format the complete list of members in a way that can be consumed by
280   //    CodeGenTypes::ComputeRecordLayout.
281   CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
282   if (D->isUnion()) {
283     lowerUnion(NVBaseType);
284     computeVolatileBitfields();
285     return;
286   }
287   accumulateFields();
288   // RD implies C++.
289   if (RD) {
290     accumulateVPtrs();
291     accumulateBases();
292     if (Members.empty()) {
293       appendPaddingBytes(Size);
294       computeVolatileBitfields();
295       return;
296     }
297     if (!NVBaseType)
298       accumulateVBases();
299   }
300   llvm::stable_sort(Members);
301   Members.push_back(StorageInfo(Size, getIntNType(8)));
302   clipTailPadding();
303   determinePacked(NVBaseType);
304   insertPadding();
305   Members.pop_back();
306   calculateZeroInit();
307   fillOutputFields();
308   computeVolatileBitfields();
309 }
310 
311 void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
312   CharUnits LayoutSize =
313       isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize();
314   llvm::Type *StorageType = nullptr;
315   bool SeenNamedMember = false;
316   // Iterate through the fields setting bitFieldInfo and the Fields array. Also
317   // locate the "most appropriate" storage type.  The heuristic for finding the
318   // storage type isn't necessary, the first (non-0-length-bitfield) field's
319   // type would work fine and be simpler but would be different than what we've
320   // been doing and cause lit tests to change.
321   for (const auto *Field : D->fields()) {
322     if (Field->isBitField()) {
323       if (Field->isZeroLengthBitField(Context))
324         continue;
325       llvm::Type *FieldType = getStorageType(Field);
326       if (LayoutSize < getSize(FieldType))
327         FieldType = getByteArrayType(LayoutSize);
328       setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
329     }
330     Fields[Field->getCanonicalDecl()] = 0;
331     llvm::Type *FieldType = getStorageType(Field);
332     // Compute zero-initializable status.
333     // This union might not be zero initialized: it may contain a pointer to
334     // data member which might have some exotic initialization sequence.
335     // If this is the case, then we aught not to try and come up with a "better"
336     // type, it might not be very easy to come up with a Constant which
337     // correctly initializes it.
338     if (!SeenNamedMember) {
339       SeenNamedMember = Field->getIdentifier();
340       if (!SeenNamedMember)
341         if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
342           SeenNamedMember = FieldRD->findFirstNamedDataMember();
343       if (SeenNamedMember && !isZeroInitializable(Field)) {
344         IsZeroInitializable = IsZeroInitializableAsBase = false;
345         StorageType = FieldType;
346       }
347     }
348     // Because our union isn't zero initializable, we won't be getting a better
349     // storage type.
350     if (!IsZeroInitializable)
351       continue;
352     // Conditionally update our storage type if we've got a new "better" one.
353     if (!StorageType ||
354         getAlignment(FieldType) >  getAlignment(StorageType) ||
355         (getAlignment(FieldType) == getAlignment(StorageType) &&
356         getSize(FieldType) > getSize(StorageType)))
357       StorageType = FieldType;
358   }
359   // If we have no storage type just pad to the appropriate size and return.
360   if (!StorageType)
361     return appendPaddingBytes(LayoutSize);
362   // If our storage size was bigger than our required size (can happen in the
363   // case of packed bitfields on Itanium) then just use an I8 array.
364   if (LayoutSize < getSize(StorageType))
365     StorageType = getByteArrayType(LayoutSize);
366   FieldTypes.push_back(StorageType);
367   appendPaddingBytes(LayoutSize - getSize(StorageType));
368   // Set packed if we need it.
369   const auto StorageAlignment = getAlignment(StorageType);
370   assert((Layout.getSize() % StorageAlignment == 0 ||
371           Layout.getDataSize() % StorageAlignment) &&
372          "Union's standard layout and no_unique_address layout must agree on "
373          "packedness");
374   if (Layout.getDataSize() % StorageAlignment)
375     Packed = true;
376 }
377 
378 void CGRecordLowering::accumulateFields() {
379   for (RecordDecl::field_iterator Field = D->field_begin(),
380                                   FieldEnd = D->field_end();
381     Field != FieldEnd;) {
382     if (Field->isBitField()) {
383       RecordDecl::field_iterator Start = Field;
384       // Iterate to gather the list of bitfields.
385       for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
386       accumulateBitFields(Start, Field);
387     } else if (!Field->isZeroSize(Context)) {
388       // Use base subobject layout for the potentially-overlapping field,
389       // as it is done in RecordLayoutBuilder
390       Members.push_back(MemberInfo(
391           bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
392           Field->isPotentiallyOverlapping()
393               ? getStorageType(Field->getType()->getAsCXXRecordDecl())
394               : getStorageType(*Field),
395           *Field));
396       ++Field;
397     } else {
398       ++Field;
399     }
400   }
401 }
402 
403 void
404 CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
405                                       RecordDecl::field_iterator FieldEnd) {
406   // Run stores the first element of the current run of bitfields.  FieldEnd is
407   // used as a special value to note that we don't have a current run.  A
408   // bitfield run is a contiguous collection of bitfields that can be stored in
409   // the same storage block.  Zero-sized bitfields and bitfields that would
410   // cross an alignment boundary break a run and start a new one.
411   RecordDecl::field_iterator Run = FieldEnd;
412   // Tail is the offset of the first bit off the end of the current run.  It's
413   // used to determine if the ASTRecordLayout is treating these two bitfields as
414   // contiguous.  StartBitOffset is offset of the beginning of the Run.
415   uint64_t StartBitOffset, Tail = 0;
416   if (isDiscreteBitFieldABI()) {
417     for (; Field != FieldEnd; ++Field) {
418       uint64_t BitOffset = getFieldBitOffset(*Field);
419       // Zero-width bitfields end runs.
420       if (Field->isZeroLengthBitField(Context)) {
421         Run = FieldEnd;
422         continue;
423       }
424       llvm::Type *Type =
425           Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
426       // If we don't have a run yet, or don't live within the previous run's
427       // allocated storage then we allocate some storage and start a new run.
428       if (Run == FieldEnd || BitOffset >= Tail) {
429         Run = Field;
430         StartBitOffset = BitOffset;
431         Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
432         // Add the storage member to the record.  This must be added to the
433         // record before the bitfield members so that it gets laid out before
434         // the bitfields it contains get laid out.
435         Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
436       }
437       // Bitfields get the offset of their storage but come afterward and remain
438       // there after a stable sort.
439       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
440                                    MemberInfo::Field, nullptr, *Field));
441     }
442     return;
443   }
444 
445   // Check if OffsetInRecord (the size in bits of the current run) is better
446   // as a single field run. When OffsetInRecord has legal integer width, and
447   // its bitfield offset is naturally aligned, it is better to make the
448   // bitfield a separate storage component so as it can be accessed directly
449   // with lower cost.
450   auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
451                                       uint64_t StartBitOffset) {
452     if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
453       return false;
454     if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
455         !DataLayout.fitsInLegalInteger(OffsetInRecord))
456       return false;
457     // Make sure StartBitOffset is naturally aligned if it is treated as an
458     // IType integer.
459     if (StartBitOffset %
460             Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
461         0)
462       return false;
463     return true;
464   };
465 
466   // The start field is better as a single field run.
467   bool StartFieldAsSingleRun = false;
468   for (;;) {
469     // Check to see if we need to start a new run.
470     if (Run == FieldEnd) {
471       // If we're out of fields, return.
472       if (Field == FieldEnd)
473         break;
474       // Any non-zero-length bitfield can start a new run.
475       if (!Field->isZeroLengthBitField(Context)) {
476         Run = Field;
477         StartBitOffset = getFieldBitOffset(*Field);
478         Tail = StartBitOffset + Field->getBitWidthValue(Context);
479         StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
480                                                          StartBitOffset);
481       }
482       ++Field;
483       continue;
484     }
485 
486     // If the start field of a new run is better as a single run, or
487     // if current field (or consecutive fields) is better as a single run, or
488     // if current field has zero width bitfield and either
489     // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
490     // true, or
491     // if the offset of current field is inconsistent with the offset of
492     // previous field plus its offset,
493     // skip the block below and go ahead to emit the storage.
494     // Otherwise, try to add bitfields to the run.
495     if (!StartFieldAsSingleRun && Field != FieldEnd &&
496         !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
497         (!Field->isZeroLengthBitField(Context) ||
498          (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
499           !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
500         Tail == getFieldBitOffset(*Field)) {
501       Tail += Field->getBitWidthValue(Context);
502       ++Field;
503       continue;
504     }
505 
506     // We've hit a break-point in the run and need to emit a storage field.
507     llvm::Type *Type = getIntNType(Tail - StartBitOffset);
508     // Add the storage member to the record and set the bitfield info for all of
509     // the bitfields in the run.  Bitfields get the offset of their storage but
510     // come afterward and remain there after a stable sort.
511     Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
512     for (; Run != Field; ++Run)
513       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
514                                    MemberInfo::Field, nullptr, *Run));
515     Run = FieldEnd;
516     StartFieldAsSingleRun = false;
517   }
518 }
519 
520 void CGRecordLowering::accumulateBases() {
521   // If we've got a primary virtual base, we need to add it with the bases.
522   if (Layout.isPrimaryBaseVirtual()) {
523     const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
524     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
525                                  getStorageType(BaseDecl), BaseDecl));
526   }
527   // Accumulate the non-virtual bases.
528   for (const auto &Base : RD->bases()) {
529     if (Base.isVirtual())
530       continue;
531 
532     // Bases can be zero-sized even if not technically empty if they
533     // contain only a trailing array member.
534     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
535     if (!BaseDecl->isEmpty() &&
536         !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
537       Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
538           MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
539   }
540 }
541 
542 /// The AAPCS that defines that, when possible, bit-fields should
543 /// be accessed using containers of the declared type width:
544 /// When a volatile bit-field is read, and its container does not overlap with
545 /// any non-bit-field member or any zero length bit-field member, its container
546 /// must be read exactly once using the access width appropriate to the type of
547 /// the container. When a volatile bit-field is written, and its container does
548 /// not overlap with any non-bit-field member or any zero-length bit-field
549 /// member, its container must be read exactly once and written exactly once
550 /// using the access width appropriate to the type of the container. The two
551 /// accesses are not atomic.
552 ///
553 /// Enforcing the width restriction can be disabled using
554 /// -fno-aapcs-bitfield-width.
555 void CGRecordLowering::computeVolatileBitfields() {
556   if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
557     return;
558 
559   for (auto &I : BitFields) {
560     const FieldDecl *Field = I.first;
561     CGBitFieldInfo &Info = I.second;
562     llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
563     // If the record alignment is less than the type width, we can't enforce a
564     // aligned load, bail out.
565     if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
566         ResLTy->getPrimitiveSizeInBits())
567       continue;
568     // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
569     // for big-endian targets, but it assumes a container of width
570     // Info.StorageSize. Since AAPCS uses a different container size (width
571     // of the type), we first undo that calculation here and redo it once
572     // the bit-field offset within the new container is calculated.
573     const unsigned OldOffset =
574         isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
575     // Offset to the bit-field from the beginning of the struct.
576     const unsigned AbsoluteOffset =
577         Context.toBits(Info.StorageOffset) + OldOffset;
578 
579     // Container size is the width of the bit-field type.
580     const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
581     // Nothing to do if the access uses the desired
582     // container width and is naturally aligned.
583     if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
584       continue;
585 
586     // Offset within the container.
587     unsigned Offset = AbsoluteOffset & (StorageSize - 1);
588     // Bail out if an aligned load of the container cannot cover the entire
589     // bit-field. This can happen for example, if the bit-field is part of a
590     // packed struct. AAPCS does not define access rules for such cases, we let
591     // clang to follow its own rules.
592     if (Offset + Info.Size > StorageSize)
593       continue;
594 
595     // Re-adjust offsets for big-endian targets.
596     if (isBE())
597       Offset = StorageSize - (Offset + Info.Size);
598 
599     const CharUnits StorageOffset =
600         Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
601     const CharUnits End = StorageOffset +
602                           Context.toCharUnitsFromBits(StorageSize) -
603                           CharUnits::One();
604 
605     const ASTRecordLayout &Layout =
606         Context.getASTRecordLayout(Field->getParent());
607     // If we access outside memory outside the record, than bail out.
608     const CharUnits RecordSize = Layout.getSize();
609     if (End >= RecordSize)
610       continue;
611 
612     // Bail out if performing this load would access non-bit-fields members.
613     bool Conflict = false;
614     for (const auto *F : D->fields()) {
615       // Allow sized bit-fields overlaps.
616       if (F->isBitField() && !F->isZeroLengthBitField(Context))
617         continue;
618 
619       const CharUnits FOffset = Context.toCharUnitsFromBits(
620           Layout.getFieldOffset(F->getFieldIndex()));
621 
622       // As C11 defines, a zero sized bit-field defines a barrier, so
623       // fields after and before it should be race condition free.
624       // The AAPCS acknowledges it and imposes no restritions when the
625       // natural container overlaps a zero-length bit-field.
626       if (F->isZeroLengthBitField(Context)) {
627         if (End > FOffset && StorageOffset < FOffset) {
628           Conflict = true;
629           break;
630         }
631       }
632 
633       const CharUnits FEnd =
634           FOffset +
635           Context.toCharUnitsFromBits(
636               Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
637           CharUnits::One();
638       // If no overlap, continue.
639       if (End < FOffset || FEnd < StorageOffset)
640         continue;
641 
642       // The desired load overlaps a non-bit-field member, bail out.
643       Conflict = true;
644       break;
645     }
646 
647     if (Conflict)
648       continue;
649     // Write the new bit-field access parameters.
650     // As the storage offset now is defined as the number of elements from the
651     // start of the structure, we should divide the Offset by the element size.
652     Info.VolatileStorageOffset =
653         StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
654     Info.VolatileStorageSize = StorageSize;
655     Info.VolatileOffset = Offset;
656   }
657 }
658 
659 void CGRecordLowering::accumulateVPtrs() {
660   if (Layout.hasOwnVFPtr())
661     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
662         llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
663             getPointerTo()->getPointerTo()));
664   if (Layout.hasOwnVBPtr())
665     Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
666         llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
667 }
668 
669 void CGRecordLowering::accumulateVBases() {
670   CharUnits ScissorOffset = Layout.getNonVirtualSize();
671   // In the itanium ABI, it's possible to place a vbase at a dsize that is
672   // smaller than the nvsize.  Here we check to see if such a base is placed
673   // before the nvsize and set the scissor offset to that, instead of the
674   // nvsize.
675   if (isOverlappingVBaseABI())
676     for (const auto &Base : RD->vbases()) {
677       const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
678       if (BaseDecl->isEmpty())
679         continue;
680       // If the vbase is a primary virtual base of some base, then it doesn't
681       // get its own storage location but instead lives inside of that base.
682       if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
683         continue;
684       ScissorOffset = std::min(ScissorOffset,
685                                Layout.getVBaseClassOffset(BaseDecl));
686     }
687   Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
688                                RD));
689   for (const auto &Base : RD->vbases()) {
690     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
691     if (BaseDecl->isEmpty())
692       continue;
693     CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
694     // If the vbase is a primary virtual base of some base, then it doesn't
695     // get its own storage location but instead lives inside of that base.
696     if (isOverlappingVBaseABI() &&
697         Context.isNearlyEmpty(BaseDecl) &&
698         !hasOwnStorage(RD, BaseDecl)) {
699       Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
700                                    BaseDecl));
701       continue;
702     }
703     // If we've got a vtordisp, add it as a storage type.
704     if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
705       Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
706                                     getIntNType(32)));
707     Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
708                                  getStorageType(BaseDecl), BaseDecl));
709   }
710 }
711 
712 bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
713                                      const CXXRecordDecl *Query) {
714   const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
715   if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
716     return false;
717   for (const auto &Base : Decl->bases())
718     if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
719       return false;
720   return true;
721 }
722 
723 void CGRecordLowering::calculateZeroInit() {
724   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
725                                                MemberEnd = Members.end();
726        IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
727     if (Member->Kind == MemberInfo::Field) {
728       if (!Member->FD || isZeroInitializable(Member->FD))
729         continue;
730       IsZeroInitializable = IsZeroInitializableAsBase = false;
731     } else if (Member->Kind == MemberInfo::Base ||
732                Member->Kind == MemberInfo::VBase) {
733       if (isZeroInitializable(Member->RD))
734         continue;
735       IsZeroInitializable = false;
736       if (Member->Kind == MemberInfo::Base)
737         IsZeroInitializableAsBase = false;
738     }
739   }
740 }
741 
742 void CGRecordLowering::clipTailPadding() {
743   std::vector<MemberInfo>::iterator Prior = Members.begin();
744   CharUnits Tail = getSize(Prior->Data);
745   for (std::vector<MemberInfo>::iterator Member = Prior + 1,
746                                          MemberEnd = Members.end();
747        Member != MemberEnd; ++Member) {
748     // Only members with data and the scissor can cut into tail padding.
749     if (!Member->Data && Member->Kind != MemberInfo::Scissor)
750       continue;
751     if (Member->Offset < Tail) {
752       assert(Prior->Kind == MemberInfo::Field &&
753              "Only storage fields have tail padding!");
754       if (!Prior->FD || Prior->FD->isBitField())
755         Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
756             cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
757       else {
758         assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
759                "should not have reused this field's tail padding");
760         Prior->Data = getByteArrayType(
761             Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
762       }
763     }
764     if (Member->Data)
765       Prior = Member;
766     Tail = Prior->Offset + getSize(Prior->Data);
767   }
768 }
769 
770 void CGRecordLowering::determinePacked(bool NVBaseType) {
771   if (Packed)
772     return;
773   CharUnits Alignment = CharUnits::One();
774   CharUnits NVAlignment = CharUnits::One();
775   CharUnits NVSize =
776       !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
777   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
778                                                MemberEnd = Members.end();
779        Member != MemberEnd; ++Member) {
780     if (!Member->Data)
781       continue;
782     // If any member falls at an offset that it not a multiple of its alignment,
783     // then the entire record must be packed.
784     if (Member->Offset % getAlignment(Member->Data))
785       Packed = true;
786     if (Member->Offset < NVSize)
787       NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
788     Alignment = std::max(Alignment, getAlignment(Member->Data));
789   }
790   // If the size of the record (the capstone's offset) is not a multiple of the
791   // record's alignment, it must be packed.
792   if (Members.back().Offset % Alignment)
793     Packed = true;
794   // If the non-virtual sub-object is not a multiple of the non-virtual
795   // sub-object's alignment, it must be packed.  We cannot have a packed
796   // non-virtual sub-object and an unpacked complete object or vise versa.
797   if (NVSize % NVAlignment)
798     Packed = true;
799   // Update the alignment of the sentinel.
800   if (!Packed)
801     Members.back().Data = getIntNType(Context.toBits(Alignment));
802 }
803 
804 void CGRecordLowering::insertPadding() {
805   std::vector<std::pair<CharUnits, CharUnits> > Padding;
806   CharUnits Size = CharUnits::Zero();
807   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
808                                                MemberEnd = Members.end();
809        Member != MemberEnd; ++Member) {
810     if (!Member->Data)
811       continue;
812     CharUnits Offset = Member->Offset;
813     assert(Offset >= Size);
814     // Insert padding if we need to.
815     if (Offset !=
816         Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
817       Padding.push_back(std::make_pair(Size, Offset - Size));
818     Size = Offset + getSize(Member->Data);
819   }
820   if (Padding.empty())
821     return;
822   // Add the padding to the Members list and sort it.
823   for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
824         Pad = Padding.begin(), PadEnd = Padding.end();
825         Pad != PadEnd; ++Pad)
826     Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
827   llvm::stable_sort(Members);
828 }
829 
830 void CGRecordLowering::fillOutputFields() {
831   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
832                                                MemberEnd = Members.end();
833        Member != MemberEnd; ++Member) {
834     if (Member->Data)
835       FieldTypes.push_back(Member->Data);
836     if (Member->Kind == MemberInfo::Field) {
837       if (Member->FD)
838         Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
839       // A field without storage must be a bitfield.
840       if (!Member->Data)
841         setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
842     } else if (Member->Kind == MemberInfo::Base)
843       NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
844     else if (Member->Kind == MemberInfo::VBase)
845       VirtualBases[Member->RD] = FieldTypes.size() - 1;
846   }
847 }
848 
849 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
850                                         const FieldDecl *FD,
851                                         uint64_t Offset, uint64_t Size,
852                                         uint64_t StorageSize,
853                                         CharUnits StorageOffset) {
854   // This function is vestigial from CGRecordLayoutBuilder days but is still
855   // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
856   // when addressed will allow for the removal of this function.
857   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
858   CharUnits TypeSizeInBytes =
859     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
860   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
861 
862   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
863 
864   if (Size > TypeSizeInBits) {
865     // We have a wide bit-field. The extra bits are only used for padding, so
866     // if we have a bitfield of type T, with size N:
867     //
868     // T t : N;
869     //
870     // We can just assume that it's:
871     //
872     // T t : sizeof(T);
873     //
874     Size = TypeSizeInBits;
875   }
876 
877   // Reverse the bit offsets for big endian machines. Because we represent
878   // a bitfield as a single large integer load, we can imagine the bits
879   // counting from the most-significant-bit instead of the
880   // least-significant-bit.
881   if (Types.getDataLayout().isBigEndian()) {
882     Offset = StorageSize - (Offset + Size);
883   }
884 
885   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
886 }
887 
888 std::unique_ptr<CGRecordLayout>
889 CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
890   CGRecordLowering Builder(*this, D, /*Packed=*/false);
891 
892   Builder.lower(/*NonVirtualBaseType=*/false);
893 
894   // If we're in C++, compute the base subobject type.
895   llvm::StructType *BaseTy = nullptr;
896   if (isa<CXXRecordDecl>(D)) {
897     BaseTy = Ty;
898     if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
899       CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
900       BaseBuilder.lower(/*NonVirtualBaseType=*/true);
901       BaseTy = llvm::StructType::create(
902           getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
903       addRecordTypeName(D, BaseTy, ".base");
904       // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
905       // on both of them with the same index.
906       assert(Builder.Packed == BaseBuilder.Packed &&
907              "Non-virtual and complete types must agree on packedness");
908     }
909   }
910 
911   // Fill in the struct *after* computing the base type.  Filling in the body
912   // signifies that the type is no longer opaque and record layout is complete,
913   // but we may need to recursively layout D while laying D out as a base type.
914   Ty->setBody(Builder.FieldTypes, Builder.Packed);
915 
916   auto RL = std::make_unique<CGRecordLayout>(
917       Ty, BaseTy, (bool)Builder.IsZeroInitializable,
918       (bool)Builder.IsZeroInitializableAsBase);
919 
920   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
921   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
922 
923   // Add all the field numbers.
924   RL->FieldInfo.swap(Builder.Fields);
925 
926   // Add bitfield info.
927   RL->BitFields.swap(Builder.BitFields);
928 
929   // Dump the layout, if requested.
930   if (getContext().getLangOpts().DumpRecordLayouts) {
931     llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
932     llvm::outs() << "Record: ";
933     D->dump(llvm::outs());
934     llvm::outs() << "\nLayout: ";
935     RL->print(llvm::outs());
936   }
937 
938 #ifndef NDEBUG
939   // Verify that the computed LLVM struct size matches the AST layout size.
940   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
941 
942   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
943   assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
944          "Type size mismatch!");
945 
946   if (BaseTy) {
947     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
948 
949     uint64_t AlignedNonVirtualTypeSizeInBits =
950       getContext().toBits(NonVirtualSize);
951 
952     assert(AlignedNonVirtualTypeSizeInBits ==
953            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
954            "Type size mismatch!");
955   }
956 
957   // Verify that the LLVM and AST field offsets agree.
958   llvm::StructType *ST = RL->getLLVMType();
959   const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
960 
961   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
962   RecordDecl::field_iterator it = D->field_begin();
963   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
964     const FieldDecl *FD = *it;
965 
966     // Ignore zero-sized fields.
967     if (FD->isZeroSize(getContext()))
968       continue;
969 
970     // For non-bit-fields, just check that the LLVM struct offset matches the
971     // AST offset.
972     if (!FD->isBitField()) {
973       unsigned FieldNo = RL->getLLVMFieldNo(FD);
974       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
975              "Invalid field offset!");
976       continue;
977     }
978 
979     // Ignore unnamed bit-fields.
980     if (!FD->getDeclName())
981       continue;
982 
983     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
984     llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
985 
986     // Unions have overlapping elements dictating their layout, but for
987     // non-unions we can verify that this section of the layout is the exact
988     // expected size.
989     if (D->isUnion()) {
990       // For unions we verify that the start is zero and the size
991       // is in-bounds. However, on BE systems, the offset may be non-zero, but
992       // the size + offset should match the storage size in that case as it
993       // "starts" at the back.
994       if (getDataLayout().isBigEndian())
995         assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
996                Info.StorageSize &&
997                "Big endian union bitfield does not end at the back");
998       else
999         assert(Info.Offset == 0 &&
1000                "Little endian union bitfield with a non-zero offset");
1001       assert(Info.StorageSize <= SL->getSizeInBits() &&
1002              "Union not large enough for bitfield storage");
1003     } else {
1004       assert((Info.StorageSize ==
1005                   getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
1006               Info.VolatileStorageSize ==
1007                   getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
1008              "Storage size does not match the element type size");
1009     }
1010     assert(Info.Size > 0 && "Empty bitfield!");
1011     assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
1012            "Bitfield outside of its allocated storage");
1013   }
1014 #endif
1015 
1016   return RL;
1017 }
1018 
1019 void CGRecordLayout::print(raw_ostream &OS) const {
1020   OS << "<CGRecordLayout\n";
1021   OS << "  LLVMType:" << *CompleteObjectType << "\n";
1022   if (BaseSubobjectType)
1023     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
1024   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
1025   OS << "  BitFields:[\n";
1026 
1027   // Print bit-field infos in declaration order.
1028   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1029   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1030          it = BitFields.begin(), ie = BitFields.end();
1031        it != ie; ++it) {
1032     const RecordDecl *RD = it->first->getParent();
1033     unsigned Index = 0;
1034     for (RecordDecl::field_iterator
1035            it2 = RD->field_begin(); *it2 != it->first; ++it2)
1036       ++Index;
1037     BFIs.push_back(std::make_pair(Index, &it->second));
1038   }
1039   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1040   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1041     OS.indent(4);
1042     BFIs[i].second->print(OS);
1043     OS << "\n";
1044   }
1045 
1046   OS << "]>\n";
1047 }
1048 
1049 LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
1050   print(llvm::errs());
1051 }
1052 
1053 void CGBitFieldInfo::print(raw_ostream &OS) const {
1054   OS << "<CGBitFieldInfo"
1055      << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
1056      << " StorageSize:" << StorageSize
1057      << " StorageOffset:" << StorageOffset.getQuantity()
1058      << " VolatileOffset:" << VolatileOffset
1059      << " VolatileStorageSize:" << VolatileStorageSize
1060      << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
1061 }
1062 
1063 LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
1064   print(llvm::errs());
1065 }
1066