1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Builder implementation for CGRecordLayout objects.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGRecordLayout.h"
14 #include "CGCXXABI.h"
15 #include "CodeGenTypes.h"
16 #include "clang/AST/ASTContext.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/CXXInheritance.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/Expr.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/Basic/CodeGenOptions.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Support/raw_ostream.h"
29 using namespace clang;
30 using namespace CodeGen;
31
32 namespace {
33 /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
34 /// llvm::Type. Some of the lowering is straightforward, some is not. Here we
35 /// detail some of the complexities and weirdnesses here.
36 /// * LLVM does not have unions - Unions can, in theory be represented by any
37 /// llvm::Type with correct size. We choose a field via a specific heuristic
38 /// and add padding if necessary.
39 /// * LLVM does not have bitfields - Bitfields are collected into contiguous
40 /// runs and allocated as a single storage type for the run. ASTRecordLayout
41 /// contains enough information to determine where the runs break. Microsoft
42 /// and Itanium follow different rules and use different codepaths.
43 /// * It is desired that, when possible, bitfields use the appropriate iN type
44 /// when lowered to llvm types. For example unsigned x : 24 gets lowered to
45 /// i24. This isn't always possible because i24 has storage size of 32 bit
46 /// and if it is possible to use that extra byte of padding we must use
47 /// [i8 x 3] instead of i24. The function clipTailPadding does this.
48 /// C++ examples that require clipping:
49 /// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
50 /// struct A { int a : 24; }; // a must be clipped because a struct like B
51 // could exist: struct B : A { char b; }; // b goes at offset 3
52 /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
53 /// fields. The existing asserts suggest that LLVM assumes that *every* field
54 /// has an underlying storage type. Therefore empty structures containing
55 /// zero sized subobjects such as empty records or zero sized arrays still get
56 /// a zero sized (empty struct) storage type.
57 /// * Clang reads the complete type rather than the base type when generating
58 /// code to access fields. Bitfields in tail position with tail padding may
59 /// be clipped in the base class but not the complete class (we may discover
60 /// that the tail padding is not used in the complete class.) However,
61 /// because LLVM reads from the complete type it can generate incorrect code
62 /// if we do not clip the tail padding off of the bitfield in the complete
63 /// layout. This introduces a somewhat awkward extra unnecessary clip stage.
64 /// The location of the clip is stored internally as a sentinel of type
65 /// SCISSOR. If LLVM were updated to read base types (which it probably
66 /// should because locations of things such as VBases are bogus in the llvm
67 /// type anyway) then we could eliminate the SCISSOR.
68 /// * Itanium allows nearly empty primary virtual bases. These bases don't get
69 /// get their own storage because they're laid out as part of another base
70 /// or at the beginning of the structure. Determining if a VBase actually
71 /// gets storage awkwardly involves a walk of all bases.
72 /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
73 struct CGRecordLowering {
74 // MemberInfo is a helper structure that contains information about a record
75 // member. In additional to the standard member types, there exists a
76 // sentinel member type that ensures correct rounding.
77 struct MemberInfo {
78 CharUnits Offset;
79 enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
80 llvm::Type *Data;
81 union {
82 const FieldDecl *FD;
83 const CXXRecordDecl *RD;
84 };
MemberInfo__anon84e506a50111::CGRecordLowering::MemberInfo85 MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
86 const FieldDecl *FD = nullptr)
87 : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
MemberInfo__anon84e506a50111::CGRecordLowering::MemberInfo88 MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
89 const CXXRecordDecl *RD)
90 : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
91 // MemberInfos are sorted so we define a < operator.
operator <__anon84e506a50111::CGRecordLowering::MemberInfo92 bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
93 };
94 // The constructor.
95 CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
96 // Short helper routines.
97 /// Constructs a MemberInfo instance from an offset and llvm::Type *.
StorageInfo__anon84e506a50111::CGRecordLowering98 MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
99 return MemberInfo(Offset, MemberInfo::Field, Data);
100 }
101
102 /// The Microsoft bitfield layout rule allocates discrete storage
103 /// units of the field's formal type and only combines adjacent
104 /// fields of the same formal type. We want to emit a layout with
105 /// these discrete storage units instead of combining them into a
106 /// continuous run.
isDiscreteBitFieldABI__anon84e506a50111::CGRecordLowering107 bool isDiscreteBitFieldABI() {
108 return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
109 D->isMsStruct(Context);
110 }
111
112 /// Helper function to check if we are targeting AAPCS.
isAAPCS__anon84e506a50111::CGRecordLowering113 bool isAAPCS() const {
114 return Context.getTargetInfo().getABI().startswith("aapcs");
115 }
116
117 /// Helper function to check if the target machine is BigEndian.
isBE__anon84e506a50111::CGRecordLowering118 bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
119
120 /// The Itanium base layout rule allows virtual bases to overlap
121 /// other bases, which complicates layout in specific ways.
122 ///
123 /// Note specifically that the ms_struct attribute doesn't change this.
isOverlappingVBaseABI__anon84e506a50111::CGRecordLowering124 bool isOverlappingVBaseABI() {
125 return !Context.getTargetInfo().getCXXABI().isMicrosoft();
126 }
127
128 /// Wraps llvm::Type::getIntNTy with some implicit arguments.
getIntNType__anon84e506a50111::CGRecordLowering129 llvm::Type *getIntNType(uint64_t NumBits) {
130 return llvm::Type::getIntNTy(Types.getLLVMContext(),
131 (unsigned)llvm::alignTo(NumBits, 8));
132 }
133 /// Gets an llvm type of size NumBytes and alignment 1.
getByteArrayType__anon84e506a50111::CGRecordLowering134 llvm::Type *getByteArrayType(CharUnits NumBytes) {
135 assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
136 llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
137 return NumBytes == CharUnits::One() ? Type :
138 (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
139 }
140 /// Gets the storage type for a field decl and handles storage
141 /// for itanium bitfields that are smaller than their declared type.
getStorageType__anon84e506a50111::CGRecordLowering142 llvm::Type *getStorageType(const FieldDecl *FD) {
143 llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
144 if (!FD->isBitField()) return Type;
145 if (isDiscreteBitFieldABI()) return Type;
146 return getIntNType(std::min(FD->getBitWidthValue(Context),
147 (unsigned)Context.toBits(getSize(Type))));
148 }
149 /// Gets the llvm Basesubobject type from a CXXRecordDecl.
getStorageType__anon84e506a50111::CGRecordLowering150 llvm::Type *getStorageType(const CXXRecordDecl *RD) {
151 return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
152 }
bitsToCharUnits__anon84e506a50111::CGRecordLowering153 CharUnits bitsToCharUnits(uint64_t BitOffset) {
154 return Context.toCharUnitsFromBits(BitOffset);
155 }
getSize__anon84e506a50111::CGRecordLowering156 CharUnits getSize(llvm::Type *Type) {
157 return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
158 }
getAlignment__anon84e506a50111::CGRecordLowering159 CharUnits getAlignment(llvm::Type *Type) {
160 return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
161 }
isZeroInitializable__anon84e506a50111::CGRecordLowering162 bool isZeroInitializable(const FieldDecl *FD) {
163 return Types.isZeroInitializable(FD->getType());
164 }
isZeroInitializable__anon84e506a50111::CGRecordLowering165 bool isZeroInitializable(const RecordDecl *RD) {
166 return Types.isZeroInitializable(RD);
167 }
appendPaddingBytes__anon84e506a50111::CGRecordLowering168 void appendPaddingBytes(CharUnits Size) {
169 if (!Size.isZero())
170 FieldTypes.push_back(getByteArrayType(Size));
171 }
getFieldBitOffset__anon84e506a50111::CGRecordLowering172 uint64_t getFieldBitOffset(const FieldDecl *FD) {
173 return Layout.getFieldOffset(FD->getFieldIndex());
174 }
175 // Layout routines.
176 void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
177 llvm::Type *StorageType);
178 /// Lowers an ASTRecordLayout to a llvm type.
179 void lower(bool NonVirtualBaseType);
180 void lowerUnion();
181 void accumulateFields();
182 void accumulateBitFields(RecordDecl::field_iterator Field,
183 RecordDecl::field_iterator FieldEnd);
184 void computeVolatileBitfields();
185 void accumulateBases();
186 void accumulateVPtrs();
187 void accumulateVBases();
188 /// Recursively searches all of the bases to find out if a vbase is
189 /// not the primary vbase of some base class.
190 bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
191 void calculateZeroInit();
192 /// Lowers bitfield storage types to I8 arrays for bitfields with tail
193 /// padding that is or can potentially be used.
194 void clipTailPadding();
195 /// Determines if we need a packed llvm struct.
196 void determinePacked(bool NVBaseType);
197 /// Inserts padding everywhere it's needed.
198 void insertPadding();
199 /// Fills out the structures that are ultimately consumed.
200 void fillOutputFields();
201 // Input memoization fields.
202 CodeGenTypes &Types;
203 const ASTContext &Context;
204 const RecordDecl *D;
205 const CXXRecordDecl *RD;
206 const ASTRecordLayout &Layout;
207 const llvm::DataLayout &DataLayout;
208 // Helpful intermediate data-structures.
209 std::vector<MemberInfo> Members;
210 // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
211 SmallVector<llvm::Type *, 16> FieldTypes;
212 llvm::DenseMap<const FieldDecl *, unsigned> Fields;
213 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
214 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
215 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
216 bool IsZeroInitializable : 1;
217 bool IsZeroInitializableAsBase : 1;
218 bool Packed : 1;
219 private:
220 CGRecordLowering(const CGRecordLowering &) = delete;
221 void operator =(const CGRecordLowering &) = delete;
222 };
223 } // namespace {
224
CGRecordLowering(CodeGenTypes & Types,const RecordDecl * D,bool Packed)225 CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
226 bool Packed)
227 : Types(Types), Context(Types.getContext()), D(D),
228 RD(dyn_cast<CXXRecordDecl>(D)),
229 Layout(Types.getContext().getASTRecordLayout(D)),
230 DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
231 IsZeroInitializableAsBase(true), Packed(Packed) {}
232
setBitFieldInfo(const FieldDecl * FD,CharUnits StartOffset,llvm::Type * StorageType)233 void CGRecordLowering::setBitFieldInfo(
234 const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
235 CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
236 Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
237 Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
238 Info.Size = FD->getBitWidthValue(Context);
239 Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
240 Info.StorageOffset = StartOffset;
241 if (Info.Size > Info.StorageSize)
242 Info.Size = Info.StorageSize;
243 // Reverse the bit offsets for big endian machines. Because we represent
244 // a bitfield as a single large integer load, we can imagine the bits
245 // counting from the most-significant-bit instead of the
246 // least-significant-bit.
247 if (DataLayout.isBigEndian())
248 Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
249
250 Info.VolatileStorageSize = 0;
251 Info.VolatileOffset = 0;
252 Info.VolatileStorageOffset = CharUnits::Zero();
253 }
254
lower(bool NVBaseType)255 void CGRecordLowering::lower(bool NVBaseType) {
256 // The lowering process implemented in this function takes a variety of
257 // carefully ordered phases.
258 // 1) Store all members (fields and bases) in a list and sort them by offset.
259 // 2) Add a 1-byte capstone member at the Size of the structure.
260 // 3) Clip bitfield storages members if their tail padding is or might be
261 // used by another field or base. The clipping process uses the capstone
262 // by treating it as another object that occurs after the record.
263 // 4) Determine if the llvm-struct requires packing. It's important that this
264 // phase occur after clipping, because clipping changes the llvm type.
265 // This phase reads the offset of the capstone when determining packedness
266 // and updates the alignment of the capstone to be equal of the alignment
267 // of the record after doing so.
268 // 5) Insert padding everywhere it is needed. This phase requires 'Packed' to
269 // have been computed and needs to know the alignment of the record in
270 // order to understand if explicit tail padding is needed.
271 // 6) Remove the capstone, we don't need it anymore.
272 // 7) Determine if this record can be zero-initialized. This phase could have
273 // been placed anywhere after phase 1.
274 // 8) Format the complete list of members in a way that can be consumed by
275 // CodeGenTypes::ComputeRecordLayout.
276 CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
277 if (D->isUnion()) {
278 lowerUnion();
279 computeVolatileBitfields();
280 return;
281 }
282 accumulateFields();
283 // RD implies C++.
284 if (RD) {
285 accumulateVPtrs();
286 accumulateBases();
287 if (Members.empty()) {
288 appendPaddingBytes(Size);
289 computeVolatileBitfields();
290 return;
291 }
292 if (!NVBaseType)
293 accumulateVBases();
294 }
295 llvm::stable_sort(Members);
296 Members.push_back(StorageInfo(Size, getIntNType(8)));
297 clipTailPadding();
298 determinePacked(NVBaseType);
299 insertPadding();
300 Members.pop_back();
301 calculateZeroInit();
302 fillOutputFields();
303 computeVolatileBitfields();
304 }
305
lowerUnion()306 void CGRecordLowering::lowerUnion() {
307 CharUnits LayoutSize = Layout.getSize();
308 llvm::Type *StorageType = nullptr;
309 bool SeenNamedMember = false;
310 // Iterate through the fields setting bitFieldInfo and the Fields array. Also
311 // locate the "most appropriate" storage type. The heuristic for finding the
312 // storage type isn't necessary, the first (non-0-length-bitfield) field's
313 // type would work fine and be simpler but would be different than what we've
314 // been doing and cause lit tests to change.
315 for (const auto *Field : D->fields()) {
316 if (Field->isBitField()) {
317 if (Field->isZeroLengthBitField(Context))
318 continue;
319 llvm::Type *FieldType = getStorageType(Field);
320 if (LayoutSize < getSize(FieldType))
321 FieldType = getByteArrayType(LayoutSize);
322 setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
323 }
324 Fields[Field->getCanonicalDecl()] = 0;
325 llvm::Type *FieldType = getStorageType(Field);
326 // Compute zero-initializable status.
327 // This union might not be zero initialized: it may contain a pointer to
328 // data member which might have some exotic initialization sequence.
329 // If this is the case, then we aught not to try and come up with a "better"
330 // type, it might not be very easy to come up with a Constant which
331 // correctly initializes it.
332 if (!SeenNamedMember) {
333 SeenNamedMember = Field->getIdentifier();
334 if (!SeenNamedMember)
335 if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
336 SeenNamedMember = FieldRD->findFirstNamedDataMember();
337 if (SeenNamedMember && !isZeroInitializable(Field)) {
338 IsZeroInitializable = IsZeroInitializableAsBase = false;
339 StorageType = FieldType;
340 }
341 }
342 // Because our union isn't zero initializable, we won't be getting a better
343 // storage type.
344 if (!IsZeroInitializable)
345 continue;
346 // Conditionally update our storage type if we've got a new "better" one.
347 if (!StorageType ||
348 getAlignment(FieldType) > getAlignment(StorageType) ||
349 (getAlignment(FieldType) == getAlignment(StorageType) &&
350 getSize(FieldType) > getSize(StorageType)))
351 StorageType = FieldType;
352 }
353 // If we have no storage type just pad to the appropriate size and return.
354 if (!StorageType)
355 return appendPaddingBytes(LayoutSize);
356 // If our storage size was bigger than our required size (can happen in the
357 // case of packed bitfields on Itanium) then just use an I8 array.
358 if (LayoutSize < getSize(StorageType))
359 StorageType = getByteArrayType(LayoutSize);
360 FieldTypes.push_back(StorageType);
361 appendPaddingBytes(LayoutSize - getSize(StorageType));
362 // Set packed if we need it.
363 if (LayoutSize % getAlignment(StorageType))
364 Packed = true;
365 }
366
accumulateFields()367 void CGRecordLowering::accumulateFields() {
368 for (RecordDecl::field_iterator Field = D->field_begin(),
369 FieldEnd = D->field_end();
370 Field != FieldEnd;) {
371 if (Field->isBitField()) {
372 RecordDecl::field_iterator Start = Field;
373 // Iterate to gather the list of bitfields.
374 for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
375 accumulateBitFields(Start, Field);
376 } else if (!Field->isZeroSize(Context)) {
377 Members.push_back(MemberInfo(
378 bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
379 getStorageType(*Field), *Field));
380 ++Field;
381 } else {
382 ++Field;
383 }
384 }
385 }
386
387 void
accumulateBitFields(RecordDecl::field_iterator Field,RecordDecl::field_iterator FieldEnd)388 CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
389 RecordDecl::field_iterator FieldEnd) {
390 // Run stores the first element of the current run of bitfields. FieldEnd is
391 // used as a special value to note that we don't have a current run. A
392 // bitfield run is a contiguous collection of bitfields that can be stored in
393 // the same storage block. Zero-sized bitfields and bitfields that would
394 // cross an alignment boundary break a run and start a new one.
395 RecordDecl::field_iterator Run = FieldEnd;
396 // Tail is the offset of the first bit off the end of the current run. It's
397 // used to determine if the ASTRecordLayout is treating these two bitfields as
398 // contiguous. StartBitOffset is offset of the beginning of the Run.
399 uint64_t StartBitOffset, Tail = 0;
400 if (isDiscreteBitFieldABI()) {
401 for (; Field != FieldEnd; ++Field) {
402 uint64_t BitOffset = getFieldBitOffset(*Field);
403 // Zero-width bitfields end runs.
404 if (Field->isZeroLengthBitField(Context)) {
405 Run = FieldEnd;
406 continue;
407 }
408 llvm::Type *Type =
409 Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
410 // If we don't have a run yet, or don't live within the previous run's
411 // allocated storage then we allocate some storage and start a new run.
412 if (Run == FieldEnd || BitOffset >= Tail) {
413 Run = Field;
414 StartBitOffset = BitOffset;
415 Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
416 // Add the storage member to the record. This must be added to the
417 // record before the bitfield members so that it gets laid out before
418 // the bitfields it contains get laid out.
419 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
420 }
421 // Bitfields get the offset of their storage but come afterward and remain
422 // there after a stable sort.
423 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
424 MemberInfo::Field, nullptr, *Field));
425 }
426 return;
427 }
428
429 // Check if OffsetInRecord (the size in bits of the current run) is better
430 // as a single field run. When OffsetInRecord has legal integer width, and
431 // its bitfield offset is naturally aligned, it is better to make the
432 // bitfield a separate storage component so as it can be accessed directly
433 // with lower cost.
434 auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
435 uint64_t StartBitOffset) {
436 if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
437 return false;
438 if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
439 !DataLayout.fitsInLegalInteger(OffsetInRecord))
440 return false;
441 // Make sure StartBitOffset is naturally aligned if it is treated as an
442 // IType integer.
443 if (StartBitOffset %
444 Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
445 0)
446 return false;
447 return true;
448 };
449
450 // The start field is better as a single field run.
451 bool StartFieldAsSingleRun = false;
452 for (;;) {
453 // Check to see if we need to start a new run.
454 if (Run == FieldEnd) {
455 // If we're out of fields, return.
456 if (Field == FieldEnd)
457 break;
458 // Any non-zero-length bitfield can start a new run.
459 if (!Field->isZeroLengthBitField(Context)) {
460 Run = Field;
461 StartBitOffset = getFieldBitOffset(*Field);
462 Tail = StartBitOffset + Field->getBitWidthValue(Context);
463 StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
464 StartBitOffset);
465 }
466 ++Field;
467 continue;
468 }
469
470 // If the start field of a new run is better as a single run, or
471 // if current field (or consecutive fields) is better as a single run, or
472 // if current field has zero width bitfield and either
473 // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
474 // true, or
475 // if the offset of current field is inconsistent with the offset of
476 // previous field plus its offset,
477 // skip the block below and go ahead to emit the storage.
478 // Otherwise, try to add bitfields to the run.
479 if (!StartFieldAsSingleRun && Field != FieldEnd &&
480 !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
481 (!Field->isZeroLengthBitField(Context) ||
482 (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
483 !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
484 Tail == getFieldBitOffset(*Field)) {
485 Tail += Field->getBitWidthValue(Context);
486 ++Field;
487 continue;
488 }
489
490 // We've hit a break-point in the run and need to emit a storage field.
491 llvm::Type *Type = getIntNType(Tail - StartBitOffset);
492 // Add the storage member to the record and set the bitfield info for all of
493 // the bitfields in the run. Bitfields get the offset of their storage but
494 // come afterward and remain there after a stable sort.
495 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
496 for (; Run != Field; ++Run)
497 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
498 MemberInfo::Field, nullptr, *Run));
499 Run = FieldEnd;
500 StartFieldAsSingleRun = false;
501 }
502 }
503
accumulateBases()504 void CGRecordLowering::accumulateBases() {
505 // If we've got a primary virtual base, we need to add it with the bases.
506 if (Layout.isPrimaryBaseVirtual()) {
507 const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
508 Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
509 getStorageType(BaseDecl), BaseDecl));
510 }
511 // Accumulate the non-virtual bases.
512 for (const auto &Base : RD->bases()) {
513 if (Base.isVirtual())
514 continue;
515
516 // Bases can be zero-sized even if not technically empty if they
517 // contain only a trailing array member.
518 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
519 if (!BaseDecl->isEmpty() &&
520 !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
521 Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
522 MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
523 }
524 }
525
526 /// The AAPCS that defines that, when possible, bit-fields should
527 /// be accessed using containers of the declared type width:
528 /// When a volatile bit-field is read, and its container does not overlap with
529 /// any non-bit-field member or any zero length bit-field member, its container
530 /// must be read exactly once using the access width appropriate to the type of
531 /// the container. When a volatile bit-field is written, and its container does
532 /// not overlap with any non-bit-field member or any zero-length bit-field
533 /// member, its container must be read exactly once and written exactly once
534 /// using the access width appropriate to the type of the container. The two
535 /// accesses are not atomic.
536 ///
537 /// Enforcing the width restriction can be disabled using
538 /// -fno-aapcs-bitfield-width.
computeVolatileBitfields()539 void CGRecordLowering::computeVolatileBitfields() {
540 if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
541 return;
542
543 for (auto &I : BitFields) {
544 const FieldDecl *Field = I.first;
545 CGBitFieldInfo &Info = I.second;
546 llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
547 // If the record alignment is less than the type width, we can't enforce a
548 // aligned load, bail out.
549 if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
550 ResLTy->getPrimitiveSizeInBits())
551 continue;
552 // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
553 // for big-endian targets, but it assumes a container of width
554 // Info.StorageSize. Since AAPCS uses a different container size (width
555 // of the type), we first undo that calculation here and redo it once
556 // the bit-field offset within the new container is calculated.
557 const unsigned OldOffset =
558 isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
559 // Offset to the bit-field from the beginning of the struct.
560 const unsigned AbsoluteOffset =
561 Context.toBits(Info.StorageOffset) + OldOffset;
562
563 // Container size is the width of the bit-field type.
564 const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
565 // Nothing to do if the access uses the desired
566 // container width and is naturally aligned.
567 if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
568 continue;
569
570 // Offset within the container.
571 unsigned Offset = AbsoluteOffset & (StorageSize - 1);
572 // Bail out if an aligned load of the container cannot cover the entire
573 // bit-field. This can happen for example, if the bit-field is part of a
574 // packed struct. AAPCS does not define access rules for such cases, we let
575 // clang to follow its own rules.
576 if (Offset + Info.Size > StorageSize)
577 continue;
578
579 // Re-adjust offsets for big-endian targets.
580 if (isBE())
581 Offset = StorageSize - (Offset + Info.Size);
582
583 const CharUnits StorageOffset =
584 Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
585 const CharUnits End = StorageOffset +
586 Context.toCharUnitsFromBits(StorageSize) -
587 CharUnits::One();
588
589 const ASTRecordLayout &Layout =
590 Context.getASTRecordLayout(Field->getParent());
591 // If we access outside memory outside the record, than bail out.
592 const CharUnits RecordSize = Layout.getSize();
593 if (End >= RecordSize)
594 continue;
595
596 // Bail out if performing this load would access non-bit-fields members.
597 bool Conflict = false;
598 for (const auto *F : D->fields()) {
599 // Allow sized bit-fields overlaps.
600 if (F->isBitField() && !F->isZeroLengthBitField(Context))
601 continue;
602
603 const CharUnits FOffset = Context.toCharUnitsFromBits(
604 Layout.getFieldOffset(F->getFieldIndex()));
605
606 // As C11 defines, a zero sized bit-field defines a barrier, so
607 // fields after and before it should be race condition free.
608 // The AAPCS acknowledges it and imposes no restritions when the
609 // natural container overlaps a zero-length bit-field.
610 if (F->isZeroLengthBitField(Context)) {
611 if (End > FOffset && StorageOffset < FOffset) {
612 Conflict = true;
613 break;
614 }
615 }
616
617 const CharUnits FEnd =
618 FOffset +
619 Context.toCharUnitsFromBits(
620 Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
621 CharUnits::One();
622 // If no overlap, continue.
623 if (End < FOffset || FEnd < StorageOffset)
624 continue;
625
626 // The desired load overlaps a non-bit-field member, bail out.
627 Conflict = true;
628 break;
629 }
630
631 if (Conflict)
632 continue;
633 // Write the new bit-field access parameters.
634 // As the storage offset now is defined as the number of elements from the
635 // start of the structure, we should divide the Offset by the element size.
636 Info.VolatileStorageOffset =
637 StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
638 Info.VolatileStorageSize = StorageSize;
639 Info.VolatileOffset = Offset;
640 }
641 }
642
accumulateVPtrs()643 void CGRecordLowering::accumulateVPtrs() {
644 if (Layout.hasOwnVFPtr())
645 Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
646 llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
647 getPointerTo()->getPointerTo()));
648 if (Layout.hasOwnVBPtr())
649 Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
650 llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
651 }
652
accumulateVBases()653 void CGRecordLowering::accumulateVBases() {
654 CharUnits ScissorOffset = Layout.getNonVirtualSize();
655 // In the itanium ABI, it's possible to place a vbase at a dsize that is
656 // smaller than the nvsize. Here we check to see if such a base is placed
657 // before the nvsize and set the scissor offset to that, instead of the
658 // nvsize.
659 if (isOverlappingVBaseABI())
660 for (const auto &Base : RD->vbases()) {
661 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
662 if (BaseDecl->isEmpty())
663 continue;
664 // If the vbase is a primary virtual base of some base, then it doesn't
665 // get its own storage location but instead lives inside of that base.
666 if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
667 continue;
668 ScissorOffset = std::min(ScissorOffset,
669 Layout.getVBaseClassOffset(BaseDecl));
670 }
671 Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
672 RD));
673 for (const auto &Base : RD->vbases()) {
674 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
675 if (BaseDecl->isEmpty())
676 continue;
677 CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
678 // If the vbase is a primary virtual base of some base, then it doesn't
679 // get its own storage location but instead lives inside of that base.
680 if (isOverlappingVBaseABI() &&
681 Context.isNearlyEmpty(BaseDecl) &&
682 !hasOwnStorage(RD, BaseDecl)) {
683 Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
684 BaseDecl));
685 continue;
686 }
687 // If we've got a vtordisp, add it as a storage type.
688 if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
689 Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
690 getIntNType(32)));
691 Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
692 getStorageType(BaseDecl), BaseDecl));
693 }
694 }
695
hasOwnStorage(const CXXRecordDecl * Decl,const CXXRecordDecl * Query)696 bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
697 const CXXRecordDecl *Query) {
698 const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
699 if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
700 return false;
701 for (const auto &Base : Decl->bases())
702 if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
703 return false;
704 return true;
705 }
706
calculateZeroInit()707 void CGRecordLowering::calculateZeroInit() {
708 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
709 MemberEnd = Members.end();
710 IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
711 if (Member->Kind == MemberInfo::Field) {
712 if (!Member->FD || isZeroInitializable(Member->FD))
713 continue;
714 IsZeroInitializable = IsZeroInitializableAsBase = false;
715 } else if (Member->Kind == MemberInfo::Base ||
716 Member->Kind == MemberInfo::VBase) {
717 if (isZeroInitializable(Member->RD))
718 continue;
719 IsZeroInitializable = false;
720 if (Member->Kind == MemberInfo::Base)
721 IsZeroInitializableAsBase = false;
722 }
723 }
724 }
725
clipTailPadding()726 void CGRecordLowering::clipTailPadding() {
727 std::vector<MemberInfo>::iterator Prior = Members.begin();
728 CharUnits Tail = getSize(Prior->Data);
729 for (std::vector<MemberInfo>::iterator Member = Prior + 1,
730 MemberEnd = Members.end();
731 Member != MemberEnd; ++Member) {
732 // Only members with data and the scissor can cut into tail padding.
733 if (!Member->Data && Member->Kind != MemberInfo::Scissor)
734 continue;
735 if (Member->Offset < Tail) {
736 assert(Prior->Kind == MemberInfo::Field &&
737 "Only storage fields have tail padding!");
738 if (!Prior->FD || Prior->FD->isBitField())
739 Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
740 cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
741 else {
742 assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
743 "should not have reused this field's tail padding");
744 Prior->Data = getByteArrayType(
745 Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
746 }
747 }
748 if (Member->Data)
749 Prior = Member;
750 Tail = Prior->Offset + getSize(Prior->Data);
751 }
752 }
753
determinePacked(bool NVBaseType)754 void CGRecordLowering::determinePacked(bool NVBaseType) {
755 if (Packed)
756 return;
757 CharUnits Alignment = CharUnits::One();
758 CharUnits NVAlignment = CharUnits::One();
759 CharUnits NVSize =
760 !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
761 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
762 MemberEnd = Members.end();
763 Member != MemberEnd; ++Member) {
764 if (!Member->Data)
765 continue;
766 // If any member falls at an offset that it not a multiple of its alignment,
767 // then the entire record must be packed.
768 if (Member->Offset % getAlignment(Member->Data))
769 Packed = true;
770 if (Member->Offset < NVSize)
771 NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
772 Alignment = std::max(Alignment, getAlignment(Member->Data));
773 }
774 // If the size of the record (the capstone's offset) is not a multiple of the
775 // record's alignment, it must be packed.
776 if (Members.back().Offset % Alignment)
777 Packed = true;
778 // If the non-virtual sub-object is not a multiple of the non-virtual
779 // sub-object's alignment, it must be packed. We cannot have a packed
780 // non-virtual sub-object and an unpacked complete object or vise versa.
781 if (NVSize % NVAlignment)
782 Packed = true;
783 // Update the alignment of the sentinel.
784 if (!Packed)
785 Members.back().Data = getIntNType(Context.toBits(Alignment));
786 }
787
insertPadding()788 void CGRecordLowering::insertPadding() {
789 std::vector<std::pair<CharUnits, CharUnits> > Padding;
790 CharUnits Size = CharUnits::Zero();
791 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
792 MemberEnd = Members.end();
793 Member != MemberEnd; ++Member) {
794 if (!Member->Data)
795 continue;
796 CharUnits Offset = Member->Offset;
797 assert(Offset >= Size);
798 // Insert padding if we need to.
799 if (Offset !=
800 Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
801 Padding.push_back(std::make_pair(Size, Offset - Size));
802 Size = Offset + getSize(Member->Data);
803 }
804 if (Padding.empty())
805 return;
806 // Add the padding to the Members list and sort it.
807 for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
808 Pad = Padding.begin(), PadEnd = Padding.end();
809 Pad != PadEnd; ++Pad)
810 Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
811 llvm::stable_sort(Members);
812 }
813
fillOutputFields()814 void CGRecordLowering::fillOutputFields() {
815 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
816 MemberEnd = Members.end();
817 Member != MemberEnd; ++Member) {
818 if (Member->Data)
819 FieldTypes.push_back(Member->Data);
820 if (Member->Kind == MemberInfo::Field) {
821 if (Member->FD)
822 Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
823 // A field without storage must be a bitfield.
824 if (!Member->Data)
825 setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
826 } else if (Member->Kind == MemberInfo::Base)
827 NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
828 else if (Member->Kind == MemberInfo::VBase)
829 VirtualBases[Member->RD] = FieldTypes.size() - 1;
830 }
831 }
832
MakeInfo(CodeGenTypes & Types,const FieldDecl * FD,uint64_t Offset,uint64_t Size,uint64_t StorageSize,CharUnits StorageOffset)833 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
834 const FieldDecl *FD,
835 uint64_t Offset, uint64_t Size,
836 uint64_t StorageSize,
837 CharUnits StorageOffset) {
838 // This function is vestigial from CGRecordLayoutBuilder days but is still
839 // used in GCObjCRuntime.cpp. That usage has a "fixme" attached to it that
840 // when addressed will allow for the removal of this function.
841 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
842 CharUnits TypeSizeInBytes =
843 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
844 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
845
846 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
847
848 if (Size > TypeSizeInBits) {
849 // We have a wide bit-field. The extra bits are only used for padding, so
850 // if we have a bitfield of type T, with size N:
851 //
852 // T t : N;
853 //
854 // We can just assume that it's:
855 //
856 // T t : sizeof(T);
857 //
858 Size = TypeSizeInBits;
859 }
860
861 // Reverse the bit offsets for big endian machines. Because we represent
862 // a bitfield as a single large integer load, we can imagine the bits
863 // counting from the most-significant-bit instead of the
864 // least-significant-bit.
865 if (Types.getDataLayout().isBigEndian()) {
866 Offset = StorageSize - (Offset + Size);
867 }
868
869 return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
870 }
871
872 std::unique_ptr<CGRecordLayout>
ComputeRecordLayout(const RecordDecl * D,llvm::StructType * Ty)873 CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
874 CGRecordLowering Builder(*this, D, /*Packed=*/false);
875
876 Builder.lower(/*NonVirtualBaseType=*/false);
877
878 // If we're in C++, compute the base subobject type.
879 llvm::StructType *BaseTy = nullptr;
880 if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
881 BaseTy = Ty;
882 if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
883 CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
884 BaseBuilder.lower(/*NonVirtualBaseType=*/true);
885 BaseTy = llvm::StructType::create(
886 getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
887 addRecordTypeName(D, BaseTy, ".base");
888 // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
889 // on both of them with the same index.
890 assert(Builder.Packed == BaseBuilder.Packed &&
891 "Non-virtual and complete types must agree on packedness");
892 }
893 }
894
895 // Fill in the struct *after* computing the base type. Filling in the body
896 // signifies that the type is no longer opaque and record layout is complete,
897 // but we may need to recursively layout D while laying D out as a base type.
898 Ty->setBody(Builder.FieldTypes, Builder.Packed);
899
900 auto RL = std::make_unique<CGRecordLayout>(
901 Ty, BaseTy, (bool)Builder.IsZeroInitializable,
902 (bool)Builder.IsZeroInitializableAsBase);
903
904 RL->NonVirtualBases.swap(Builder.NonVirtualBases);
905 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
906
907 // Add all the field numbers.
908 RL->FieldInfo.swap(Builder.Fields);
909
910 // Add bitfield info.
911 RL->BitFields.swap(Builder.BitFields);
912
913 // Dump the layout, if requested.
914 if (getContext().getLangOpts().DumpRecordLayouts) {
915 llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
916 llvm::outs() << "Record: ";
917 D->dump(llvm::outs());
918 llvm::outs() << "\nLayout: ";
919 RL->print(llvm::outs());
920 }
921
922 #ifndef NDEBUG
923 // Verify that the computed LLVM struct size matches the AST layout size.
924 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
925
926 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
927 assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
928 "Type size mismatch!");
929
930 if (BaseTy) {
931 CharUnits NonVirtualSize = Layout.getNonVirtualSize();
932
933 uint64_t AlignedNonVirtualTypeSizeInBits =
934 getContext().toBits(NonVirtualSize);
935
936 assert(AlignedNonVirtualTypeSizeInBits ==
937 getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
938 "Type size mismatch!");
939 }
940
941 // Verify that the LLVM and AST field offsets agree.
942 llvm::StructType *ST = RL->getLLVMType();
943 const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
944
945 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
946 RecordDecl::field_iterator it = D->field_begin();
947 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
948 const FieldDecl *FD = *it;
949
950 // Ignore zero-sized fields.
951 if (FD->isZeroSize(getContext()))
952 continue;
953
954 // For non-bit-fields, just check that the LLVM struct offset matches the
955 // AST offset.
956 if (!FD->isBitField()) {
957 unsigned FieldNo = RL->getLLVMFieldNo(FD);
958 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
959 "Invalid field offset!");
960 continue;
961 }
962
963 // Ignore unnamed bit-fields.
964 if (!FD->getDeclName())
965 continue;
966
967 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
968 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
969
970 // Unions have overlapping elements dictating their layout, but for
971 // non-unions we can verify that this section of the layout is the exact
972 // expected size.
973 if (D->isUnion()) {
974 // For unions we verify that the start is zero and the size
975 // is in-bounds. However, on BE systems, the offset may be non-zero, but
976 // the size + offset should match the storage size in that case as it
977 // "starts" at the back.
978 if (getDataLayout().isBigEndian())
979 assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
980 Info.StorageSize &&
981 "Big endian union bitfield does not end at the back");
982 else
983 assert(Info.Offset == 0 &&
984 "Little endian union bitfield with a non-zero offset");
985 assert(Info.StorageSize <= SL->getSizeInBits() &&
986 "Union not large enough for bitfield storage");
987 } else {
988 assert((Info.StorageSize ==
989 getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
990 Info.VolatileStorageSize ==
991 getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
992 "Storage size does not match the element type size");
993 }
994 assert(Info.Size > 0 && "Empty bitfield!");
995 assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
996 "Bitfield outside of its allocated storage");
997 }
998 #endif
999
1000 return RL;
1001 }
1002
print(raw_ostream & OS) const1003 void CGRecordLayout::print(raw_ostream &OS) const {
1004 OS << "<CGRecordLayout\n";
1005 OS << " LLVMType:" << *CompleteObjectType << "\n";
1006 if (BaseSubobjectType)
1007 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
1008 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
1009 OS << " BitFields:[\n";
1010
1011 // Print bit-field infos in declaration order.
1012 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1013 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1014 it = BitFields.begin(), ie = BitFields.end();
1015 it != ie; ++it) {
1016 const RecordDecl *RD = it->first->getParent();
1017 unsigned Index = 0;
1018 for (RecordDecl::field_iterator
1019 it2 = RD->field_begin(); *it2 != it->first; ++it2)
1020 ++Index;
1021 BFIs.push_back(std::make_pair(Index, &it->second));
1022 }
1023 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1024 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1025 OS.indent(4);
1026 BFIs[i].second->print(OS);
1027 OS << "\n";
1028 }
1029
1030 OS << "]>\n";
1031 }
1032
dump() const1033 LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
1034 print(llvm::errs());
1035 }
1036
print(raw_ostream & OS) const1037 void CGBitFieldInfo::print(raw_ostream &OS) const {
1038 OS << "<CGBitFieldInfo"
1039 << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
1040 << " StorageSize:" << StorageSize
1041 << " StorageOffset:" << StorageOffset.getQuantity()
1042 << " VolatileOffset:" << VolatileOffset
1043 << " VolatileStorageSize:" << VolatileStorageSize
1044 << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
1045 }
1046
dump() const1047 LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
1048 print(llvm::errs());
1049 }
1050