1 //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of the abstract lowering for the Swift calling convention.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/CodeGen/SwiftCallingConv.h"
14 #include "clang/Basic/TargetInfo.h"
15 #include "CodeGenModule.h"
16 #include "TargetInfo.h"
17 
18 using namespace clang;
19 using namespace CodeGen;
20 using namespace swiftcall;
21 
getSwiftABIInfo(CodeGenModule & CGM)22 static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) {
23   return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo());
24 }
25 
isPowerOf2(unsigned n)26 static bool isPowerOf2(unsigned n) {
27   return n == (n & -n);
28 }
29 
30 /// Given two types with the same size, try to find a common type.
getCommonType(llvm::Type * first,llvm::Type * second)31 static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
32   assert(first != second);
33 
34   // Allow pointers to merge with integers, but prefer the integer type.
35   if (first->isIntegerTy()) {
36     if (second->isPointerTy()) return first;
37   } else if (first->isPointerTy()) {
38     if (second->isIntegerTy()) return second;
39     if (second->isPointerTy()) return first;
40 
41   // Allow two vectors to be merged (given that they have the same size).
42   // This assumes that we never have two different vector register sets.
43   } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
44     if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
45       if (auto commonTy = getCommonType(firstVecTy->getElementType(),
46                                         secondVecTy->getElementType())) {
47         return (commonTy == firstVecTy->getElementType() ? first : second);
48       }
49     }
50   }
51 
52   return nullptr;
53 }
54 
getTypeStoreSize(CodeGenModule & CGM,llvm::Type * type)55 static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) {
56   return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
57 }
58 
getTypeAllocSize(CodeGenModule & CGM,llvm::Type * type)59 static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) {
60   return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type));
61 }
62 
addTypedData(QualType type,CharUnits begin)63 void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
64   // Deal with various aggregate types as special cases:
65 
66   // Record types.
67   if (auto recType = type->getAs<RecordType>()) {
68     addTypedData(recType->getDecl(), begin);
69 
70   // Array types.
71   } else if (type->isArrayType()) {
72     // Incomplete array types (flexible array members?) don't provide
73     // data to lay out, and the other cases shouldn't be possible.
74     auto arrayType = CGM.getContext().getAsConstantArrayType(type);
75     if (!arrayType) return;
76 
77     QualType eltType = arrayType->getElementType();
78     auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
79     for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
80       addTypedData(eltType, begin + i * eltSize);
81     }
82 
83   // Complex types.
84   } else if (auto complexType = type->getAs<ComplexType>()) {
85     auto eltType = complexType->getElementType();
86     auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
87     auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
88     addTypedData(eltLLVMType, begin, begin + eltSize);
89     addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
90 
91   // Member pointer types.
92   } else if (type->getAs<MemberPointerType>()) {
93     // Just add it all as opaque.
94     addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
95 
96   // Everything else is scalar and should not convert as an LLVM aggregate.
97   } else {
98     // We intentionally convert as !ForMem because we want to preserve
99     // that a type was an i1.
100     auto llvmType = CGM.getTypes().ConvertType(type);
101     addTypedData(llvmType, begin);
102   }
103 }
104 
addTypedData(const RecordDecl * record,CharUnits begin)105 void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) {
106   addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
107 }
108 
addTypedData(const RecordDecl * record,CharUnits begin,const ASTRecordLayout & layout)109 void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
110                                     const ASTRecordLayout &layout) {
111   // Unions are a special case.
112   if (record->isUnion()) {
113     for (auto field : record->fields()) {
114       if (field->isBitField()) {
115         addBitFieldData(field, begin, 0);
116       } else {
117         addTypedData(field->getType(), begin);
118       }
119     }
120     return;
121   }
122 
123   // Note that correctness does not rely on us adding things in
124   // their actual order of layout; it's just somewhat more efficient
125   // for the builder.
126 
127   // With that in mind, add "early" C++ data.
128   auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
129   if (cxxRecord) {
130     //   - a v-table pointer, if the class adds its own
131     if (layout.hasOwnVFPtr()) {
132       addTypedData(CGM.Int8PtrTy, begin);
133     }
134 
135     //   - non-virtual bases
136     for (auto &baseSpecifier : cxxRecord->bases()) {
137       if (baseSpecifier.isVirtual()) continue;
138 
139       auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
140       addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
141     }
142 
143     //   - a vbptr if the class adds its own
144     if (layout.hasOwnVBPtr()) {
145       addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
146     }
147   }
148 
149   // Add fields.
150   for (auto field : record->fields()) {
151     auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
152     if (field->isBitField()) {
153       addBitFieldData(field, begin, fieldOffsetInBits);
154     } else {
155       addTypedData(field->getType(),
156               begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
157     }
158   }
159 
160   // Add "late" C++ data:
161   if (cxxRecord) {
162     //   - virtual bases
163     for (auto &vbaseSpecifier : cxxRecord->vbases()) {
164       auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
165       addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
166     }
167   }
168 }
169 
addBitFieldData(const FieldDecl * bitfield,CharUnits recordBegin,uint64_t bitfieldBitBegin)170 void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
171                                        CharUnits recordBegin,
172                                        uint64_t bitfieldBitBegin) {
173   assert(bitfield->isBitField());
174   auto &ctx = CGM.getContext();
175   auto width = bitfield->getBitWidthValue(ctx);
176 
177   // We can ignore zero-width bit-fields.
178   if (width == 0) return;
179 
180   // toCharUnitsFromBits rounds down.
181   CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
182 
183   // Find the offset of the last byte that is partially occupied by the
184   // bit-field; since we otherwise expect exclusive ends, the end is the
185   // next byte.
186   uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
187   CharUnits bitfieldByteEnd =
188     ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
189   addOpaqueData(recordBegin + bitfieldByteBegin,
190                 recordBegin + bitfieldByteEnd);
191 }
192 
addTypedData(llvm::Type * type,CharUnits begin)193 void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) {
194   assert(type && "didn't provide type for typed data");
195   addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
196 }
197 
addTypedData(llvm::Type * type,CharUnits begin,CharUnits end)198 void SwiftAggLowering::addTypedData(llvm::Type *type,
199                                     CharUnits begin, CharUnits end) {
200   assert(type && "didn't provide type for typed data");
201   assert(getTypeStoreSize(CGM, type) == end - begin);
202 
203   // Legalize vector types.
204   if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
205     SmallVector<llvm::Type*, 4> componentTys;
206     legalizeVectorType(CGM, end - begin, vecTy, componentTys);
207     assert(componentTys.size() >= 1);
208 
209     // Walk the initial components.
210     for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
211       llvm::Type *componentTy = componentTys[i];
212       auto componentSize = getTypeStoreSize(CGM, componentTy);
213       assert(componentSize < end - begin);
214       addLegalTypedData(componentTy, begin, begin + componentSize);
215       begin += componentSize;
216     }
217 
218     return addLegalTypedData(componentTys.back(), begin, end);
219   }
220 
221   // Legalize integer types.
222   if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
223     if (!isLegalIntegerType(CGM, intTy))
224       return addOpaqueData(begin, end);
225   }
226 
227   // All other types should be legal.
228   return addLegalTypedData(type, begin, end);
229 }
230 
addLegalTypedData(llvm::Type * type,CharUnits begin,CharUnits end)231 void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
232                                          CharUnits begin, CharUnits end) {
233   // Require the type to be naturally aligned.
234   if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {
235 
236     // Try splitting vector types.
237     if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
238       auto split = splitLegalVectorType(CGM, end - begin, vecTy);
239       auto eltTy = split.first;
240       auto numElts = split.second;
241 
242       auto eltSize = (end - begin) / numElts;
243       assert(eltSize == getTypeStoreSize(CGM, eltTy));
244       for (size_t i = 0, e = numElts; i != e; ++i) {
245         addLegalTypedData(eltTy, begin, begin + eltSize);
246         begin += eltSize;
247       }
248       assert(begin == end);
249       return;
250     }
251 
252     return addOpaqueData(begin, end);
253   }
254 
255   addEntry(type, begin, end);
256 }
257 
addEntry(llvm::Type * type,CharUnits begin,CharUnits end)258 void SwiftAggLowering::addEntry(llvm::Type *type,
259                                 CharUnits begin, CharUnits end) {
260   assert((!type ||
261           (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
262          "cannot add aggregate-typed data");
263   assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
264 
265   // Fast path: we can just add entries to the end.
266   if (Entries.empty() || Entries.back().End <= begin) {
267     Entries.push_back({begin, end, type});
268     return;
269   }
270 
271   // Find the first existing entry that ends after the start of the new data.
272   // TODO: do a binary search if Entries is big enough for it to matter.
273   size_t index = Entries.size() - 1;
274   while (index != 0) {
275     if (Entries[index - 1].End <= begin) break;
276     --index;
277   }
278 
279   // The entry ends after the start of the new data.
280   // If the entry starts after the end of the new data, there's no conflict.
281   if (Entries[index].Begin >= end) {
282     // This insertion is potentially O(n), but the way we generally build
283     // these layouts makes that unlikely to matter: we'd need a union of
284     // several very large types.
285     Entries.insert(Entries.begin() + index, {begin, end, type});
286     return;
287   }
288 
289   // Otherwise, the ranges overlap.  The new range might also overlap
290   // with later ranges.
291 restartAfterSplit:
292 
293   // Simplest case: an exact overlap.
294   if (Entries[index].Begin == begin && Entries[index].End == end) {
295     // If the types match exactly, great.
296     if (Entries[index].Type == type) return;
297 
298     // If either type is opaque, make the entry opaque and return.
299     if (Entries[index].Type == nullptr) {
300       return;
301     } else if (type == nullptr) {
302       Entries[index].Type = nullptr;
303       return;
304     }
305 
306     // If they disagree in an ABI-agnostic way, just resolve the conflict
307     // arbitrarily.
308     if (auto entryType = getCommonType(Entries[index].Type, type)) {
309       Entries[index].Type = entryType;
310       return;
311     }
312 
313     // Otherwise, make the entry opaque.
314     Entries[index].Type = nullptr;
315     return;
316   }
317 
318   // Okay, we have an overlapping conflict of some sort.
319 
320   // If we have a vector type, split it.
321   if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
322     auto eltTy = vecTy->getElementType();
323     CharUnits eltSize = (end - begin) / vecTy->getNumElements();
324     assert(eltSize == getTypeStoreSize(CGM, eltTy));
325     for (unsigned i = 0, e = vecTy->getNumElements(); i != e; ++i) {
326       addEntry(eltTy, begin, begin + eltSize);
327       begin += eltSize;
328     }
329     assert(begin == end);
330     return;
331   }
332 
333   // If the entry is a vector type, split it and try again.
334   if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
335     splitVectorEntry(index);
336     goto restartAfterSplit;
337   }
338 
339   // Okay, we have no choice but to make the existing entry opaque.
340 
341   Entries[index].Type = nullptr;
342 
343   // Stretch the start of the entry to the beginning of the range.
344   if (begin < Entries[index].Begin) {
345     Entries[index].Begin = begin;
346     assert(index == 0 || begin >= Entries[index - 1].End);
347   }
348 
349   // Stretch the end of the entry to the end of the range; but if we run
350   // into the start of the next entry, just leave the range there and repeat.
351   while (end > Entries[index].End) {
352     assert(Entries[index].Type == nullptr);
353 
354     // If the range doesn't overlap the next entry, we're done.
355     if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
356       Entries[index].End = end;
357       break;
358     }
359 
360     // Otherwise, stretch to the start of the next entry.
361     Entries[index].End = Entries[index + 1].Begin;
362 
363     // Continue with the next entry.
364     index++;
365 
366     // This entry needs to be made opaque if it is not already.
367     if (Entries[index].Type == nullptr)
368       continue;
369 
370     // Split vector entries unless we completely subsume them.
371     if (Entries[index].Type->isVectorTy() &&
372         end < Entries[index].End) {
373       splitVectorEntry(index);
374     }
375 
376     // Make the entry opaque.
377     Entries[index].Type = nullptr;
378   }
379 }
380 
381 /// Replace the entry of vector type at offset 'index' with a sequence
382 /// of its component vectors.
splitVectorEntry(unsigned index)383 void SwiftAggLowering::splitVectorEntry(unsigned index) {
384   auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
385   auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
386 
387   auto eltTy = split.first;
388   CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
389   auto numElts = split.second;
390   Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
391 
392   CharUnits begin = Entries[index].Begin;
393   for (unsigned i = 0; i != numElts; ++i) {
394     Entries[index].Type = eltTy;
395     Entries[index].Begin = begin;
396     Entries[index].End = begin + eltSize;
397     begin += eltSize;
398   }
399 }
400 
401 /// Given a power-of-two unit size, return the offset of the aligned unit
402 /// of that size which contains the given offset.
403 ///
404 /// In other words, round down to the nearest multiple of the unit size.
getOffsetAtStartOfUnit(CharUnits offset,CharUnits unitSize)405 static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) {
406   assert(isPowerOf2(unitSize.getQuantity()));
407   auto unitMask = ~(unitSize.getQuantity() - 1);
408   return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
409 }
410 
areBytesInSameUnit(CharUnits first,CharUnits second,CharUnits chunkSize)411 static bool areBytesInSameUnit(CharUnits first, CharUnits second,
412                                CharUnits chunkSize) {
413   return getOffsetAtStartOfUnit(first, chunkSize)
414       == getOffsetAtStartOfUnit(second, chunkSize);
415 }
416 
isMergeableEntryType(llvm::Type * type)417 static bool isMergeableEntryType(llvm::Type *type) {
418   // Opaquely-typed memory is always mergeable.
419   if (type == nullptr) return true;
420 
421   // Pointers and integers are always mergeable.  In theory we should not
422   // merge pointers, but (1) it doesn't currently matter in practice because
423   // the chunk size is never greater than the size of a pointer and (2)
424   // Swift IRGen uses integer types for a lot of things that are "really"
425   // just storing pointers (like Optional<SomePointer>).  If we ever have a
426   // target that would otherwise combine pointers, we should put some effort
427   // into fixing those cases in Swift IRGen and then call out pointer types
428   // here.
429 
430   // Floating-point and vector types should never be merged.
431   // Most such types are too large and highly-aligned to ever trigger merging
432   // in practice, but it's important for the rule to cover at least 'half'
433   // and 'float', as well as things like small vectors of 'i1' or 'i8'.
434   return (!type->isFloatingPointTy() && !type->isVectorTy());
435 }
436 
shouldMergeEntries(const StorageEntry & first,const StorageEntry & second,CharUnits chunkSize)437 bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
438                                           const StorageEntry &second,
439                                           CharUnits chunkSize) {
440   // Only merge entries that overlap the same chunk.  We test this first
441   // despite being a bit more expensive because this is the condition that
442   // tends to prevent merging.
443   if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
444                           chunkSize))
445     return false;
446 
447   return (isMergeableEntryType(first.Type) &&
448           isMergeableEntryType(second.Type));
449 }
450 
finish()451 void SwiftAggLowering::finish() {
452   if (Entries.empty()) {
453     Finished = true;
454     return;
455   }
456 
457   // We logically split the layout down into a series of chunks of this size,
458   // which is generally the size of a pointer.
459   const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
460 
461   // First pass: if two entries should be merged, make them both opaque
462   // and stretch one to meet the next.
463   // Also, remember if there are any opaque entries.
464   bool hasOpaqueEntries = (Entries[0].Type == nullptr);
465   for (size_t i = 1, e = Entries.size(); i != e; ++i) {
466     if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
467       Entries[i - 1].Type = nullptr;
468       Entries[i].Type = nullptr;
469       Entries[i - 1].End = Entries[i].Begin;
470       hasOpaqueEntries = true;
471 
472     } else if (Entries[i].Type == nullptr) {
473       hasOpaqueEntries = true;
474     }
475   }
476 
477   // The rest of the algorithm leaves non-opaque entries alone, so if we
478   // have no opaque entries, we're done.
479   if (!hasOpaqueEntries) {
480     Finished = true;
481     return;
482   }
483 
484   // Okay, move the entries to a temporary and rebuild Entries.
485   auto orig = std::move(Entries);
486   assert(Entries.empty());
487 
488   for (size_t i = 0, e = orig.size(); i != e; ++i) {
489     // Just copy over non-opaque entries.
490     if (orig[i].Type != nullptr) {
491       Entries.push_back(orig[i]);
492       continue;
493     }
494 
495     // Scan forward to determine the full extent of the next opaque range.
496     // We know from the first pass that only contiguous ranges will overlap
497     // the same aligned chunk.
498     auto begin = orig[i].Begin;
499     auto end = orig[i].End;
500     while (i + 1 != e &&
501            orig[i + 1].Type == nullptr &&
502            end == orig[i + 1].Begin) {
503       end = orig[i + 1].End;
504       i++;
505     }
506 
507     // Add an entry per intersected chunk.
508     do {
509       // Find the smallest aligned storage unit in the maximal aligned
510       // storage unit containing 'begin' that contains all the bytes in
511       // the intersection between the range and this chunk.
512       CharUnits localBegin = begin;
513       CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
514       CharUnits chunkEnd = chunkBegin + chunkSize;
515       CharUnits localEnd = std::min(end, chunkEnd);
516 
517       // Just do a simple loop over ever-increasing unit sizes.
518       CharUnits unitSize = CharUnits::One();
519       CharUnits unitBegin, unitEnd;
520       for (; ; unitSize *= 2) {
521         assert(unitSize <= chunkSize);
522         unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
523         unitEnd = unitBegin + unitSize;
524         if (unitEnd >= localEnd) break;
525       }
526 
527       // Add an entry for this unit.
528       auto entryTy =
529         llvm::IntegerType::get(CGM.getLLVMContext(),
530                                CGM.getContext().toBits(unitSize));
531       Entries.push_back({unitBegin, unitEnd, entryTy});
532 
533       // The next chunk starts where this chunk left off.
534       begin = localEnd;
535     } while (begin != end);
536   }
537 
538   // Okay, finally finished.
539   Finished = true;
540 }
541 
enumerateComponents(EnumerationCallback callback) const542 void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const {
543   assert(Finished && "haven't yet finished lowering");
544 
545   for (auto &entry : Entries) {
546     callback(entry.Begin, entry.End, entry.Type);
547   }
548 }
549 
550 std::pair<llvm::StructType*, llvm::Type*>
getCoerceAndExpandTypes() const551 SwiftAggLowering::getCoerceAndExpandTypes() const {
552   assert(Finished && "haven't yet finished lowering");
553 
554   auto &ctx = CGM.getLLVMContext();
555 
556   if (Entries.empty()) {
557     auto type = llvm::StructType::get(ctx);
558     return { type, type };
559   }
560 
561   SmallVector<llvm::Type*, 8> elts;
562   CharUnits lastEnd = CharUnits::Zero();
563   bool hasPadding = false;
564   bool packed = false;
565   for (auto &entry : Entries) {
566     if (entry.Begin != lastEnd) {
567       auto paddingSize = entry.Begin - lastEnd;
568       assert(!paddingSize.isNegative());
569 
570       auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
571                                           paddingSize.getQuantity());
572       elts.push_back(padding);
573       hasPadding = true;
574     }
575 
576     if (!packed && !entry.Begin.isMultipleOf(
577           CharUnits::fromQuantity(
578             CGM.getDataLayout().getABITypeAlignment(entry.Type))))
579       packed = true;
580 
581     elts.push_back(entry.Type);
582 
583     lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type);
584     assert(entry.End <= lastEnd);
585   }
586 
587   // We don't need to adjust 'packed' to deal with possible tail padding
588   // because we never do that kind of access through the coercion type.
589   auto coercionType = llvm::StructType::get(ctx, elts, packed);
590 
591   llvm::Type *unpaddedType = coercionType;
592   if (hasPadding) {
593     elts.clear();
594     for (auto &entry : Entries) {
595       elts.push_back(entry.Type);
596     }
597     if (elts.size() == 1) {
598       unpaddedType = elts[0];
599     } else {
600       unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
601     }
602   } else if (Entries.size() == 1) {
603     unpaddedType = Entries[0].Type;
604   }
605 
606   return { coercionType, unpaddedType };
607 }
608 
shouldPassIndirectly(bool asReturnValue) const609 bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
610   assert(Finished && "haven't yet finished lowering");
611 
612   // Empty types don't need to be passed indirectly.
613   if (Entries.empty()) return false;
614 
615   // Avoid copying the array of types when there's just a single element.
616   if (Entries.size() == 1) {
617     return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(
618                                                            Entries.back().Type,
619                                                              asReturnValue);
620   }
621 
622   SmallVector<llvm::Type*, 8> componentTys;
623   componentTys.reserve(Entries.size());
624   for (auto &entry : Entries) {
625     componentTys.push_back(entry.Type);
626   }
627   return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
628                                                            asReturnValue);
629 }
630 
shouldPassIndirectly(CodeGenModule & CGM,ArrayRef<llvm::Type * > componentTys,bool asReturnValue)631 bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM,
632                                      ArrayRef<llvm::Type*> componentTys,
633                                      bool asReturnValue) {
634   return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
635                                                            asReturnValue);
636 }
637 
getMaximumVoluntaryIntegerSize(CodeGenModule & CGM)638 CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) {
639   // Currently always the size of an ordinary pointer.
640   return CGM.getContext().toCharUnitsFromBits(
641            CGM.getContext().getTargetInfo().getPointerWidth(0));
642 }
643 
getNaturalAlignment(CodeGenModule & CGM,llvm::Type * type)644 CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) {
645   // For Swift's purposes, this is always just the store size of the type
646   // rounded up to a power of 2.
647   auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
648   if (!isPowerOf2(size)) {
649     size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
650   }
651   assert(size >= CGM.getDataLayout().getABITypeAlignment(type));
652   return CharUnits::fromQuantity(size);
653 }
654 
isLegalIntegerType(CodeGenModule & CGM,llvm::IntegerType * intTy)655 bool swiftcall::isLegalIntegerType(CodeGenModule &CGM,
656                                    llvm::IntegerType *intTy) {
657   auto size = intTy->getBitWidth();
658   switch (size) {
659   case 1:
660   case 8:
661   case 16:
662   case 32:
663   case 64:
664     // Just assume that the above are always legal.
665     return true;
666 
667   case 128:
668     return CGM.getContext().getTargetInfo().hasInt128Type();
669 
670   default:
671     return false;
672   }
673 }
674 
isLegalVectorType(CodeGenModule & CGM,CharUnits vectorSize,llvm::VectorType * vectorTy)675 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
676                                   llvm::VectorType *vectorTy) {
677   return isLegalVectorType(CGM, vectorSize, vectorTy->getElementType(),
678                            vectorTy->getNumElements());
679 }
680 
isLegalVectorType(CodeGenModule & CGM,CharUnits vectorSize,llvm::Type * eltTy,unsigned numElts)681 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
682                                   llvm::Type *eltTy, unsigned numElts) {
683   assert(numElts > 1 && "illegal vector length");
684   return getSwiftABIInfo(CGM)
685            .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts);
686 }
687 
688 std::pair<llvm::Type*, unsigned>
splitLegalVectorType(CodeGenModule & CGM,CharUnits vectorSize,llvm::VectorType * vectorTy)689 swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
690                                 llvm::VectorType *vectorTy) {
691   auto numElts = vectorTy->getNumElements();
692   auto eltTy = vectorTy->getElementType();
693 
694   // Try to split the vector type in half.
695   if (numElts >= 4 && isPowerOf2(numElts)) {
696     if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
697       return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
698   }
699 
700   return {eltTy, numElts};
701 }
702 
legalizeVectorType(CodeGenModule & CGM,CharUnits origVectorSize,llvm::VectorType * origVectorTy,llvm::SmallVectorImpl<llvm::Type * > & components)703 void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
704                                    llvm::VectorType *origVectorTy,
705                              llvm::SmallVectorImpl<llvm::Type*> &components) {
706   // If it's already a legal vector type, use it.
707   if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
708     components.push_back(origVectorTy);
709     return;
710   }
711 
712   // Try to split the vector into legal subvectors.
713   auto numElts = origVectorTy->getNumElements();
714   auto eltTy = origVectorTy->getElementType();
715   assert(numElts != 1);
716 
717   // The largest size that we're still considering making subvectors of.
718   // Always a power of 2.
719   unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
720   unsigned candidateNumElts = 1U << logCandidateNumElts;
721   assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
722 
723   // Minor optimization: don't check the legality of this exact size twice.
724   if (candidateNumElts == numElts) {
725     logCandidateNumElts--;
726     candidateNumElts >>= 1;
727   }
728 
729   CharUnits eltSize = (origVectorSize / numElts);
730   CharUnits candidateSize = eltSize * candidateNumElts;
731 
732   // The sensibility of this algorithm relies on the fact that we never
733   // have a legal non-power-of-2 vector size without having the power of 2
734   // also be legal.
735   while (logCandidateNumElts > 0) {
736     assert(candidateNumElts == 1U << logCandidateNumElts);
737     assert(candidateNumElts <= numElts);
738     assert(candidateSize == eltSize * candidateNumElts);
739 
740     // Skip illegal vector sizes.
741     if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
742       logCandidateNumElts--;
743       candidateNumElts /= 2;
744       candidateSize /= 2;
745       continue;
746     }
747 
748     // Add the right number of vectors of this size.
749     auto numVecs = numElts >> logCandidateNumElts;
750     components.append(numVecs,
751                       llvm::FixedVectorType::get(eltTy, candidateNumElts));
752     numElts -= (numVecs << logCandidateNumElts);
753 
754     if (numElts == 0) return;
755 
756     // It's possible that the number of elements remaining will be legal.
757     // This can happen with e.g. <7 x float> when <3 x float> is legal.
758     // This only needs to be separately checked if it's not a power of 2.
759     if (numElts > 2 && !isPowerOf2(numElts) &&
760         isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
761       components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
762       return;
763     }
764 
765     // Bring vecSize down to something no larger than numElts.
766     do {
767       logCandidateNumElts--;
768       candidateNumElts /= 2;
769       candidateSize /= 2;
770     } while (candidateNumElts > numElts);
771   }
772 
773   // Otherwise, just append a bunch of individual elements.
774   components.append(numElts, eltTy);
775 }
776 
mustPassRecordIndirectly(CodeGenModule & CGM,const RecordDecl * record)777 bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM,
778                                          const RecordDecl *record) {
779   // FIXME: should we not rely on the standard computation in Sema, just in
780   // case we want to diverge from the platform ABI (e.g. on targets where
781   // that uses the MSVC rule)?
782   return !record->canPassInRegisters();
783 }
784 
classifyExpandedType(SwiftAggLowering & lowering,bool forReturn,CharUnits alignmentForIndirect)785 static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering,
786                                        bool forReturn,
787                                        CharUnits alignmentForIndirect) {
788   if (lowering.empty()) {
789     return ABIArgInfo::getIgnore();
790   } else if (lowering.shouldPassIndirectly(forReturn)) {
791     return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
792   } else {
793     auto types = lowering.getCoerceAndExpandTypes();
794     return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
795   }
796 }
797 
classifyType(CodeGenModule & CGM,CanQualType type,bool forReturn)798 static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
799                                bool forReturn) {
800   if (auto recordType = dyn_cast<RecordType>(type)) {
801     auto record = recordType->getDecl();
802     auto &layout = CGM.getContext().getASTRecordLayout(record);
803 
804     if (mustPassRecordIndirectly(CGM, record))
805       return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
806 
807     SwiftAggLowering lowering(CGM);
808     lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
809     lowering.finish();
810 
811     return classifyExpandedType(lowering, forReturn, layout.getAlignment());
812   }
813 
814   // Just assume that all of our target ABIs can support returning at least
815   // two integer or floating-point values.
816   if (isa<ComplexType>(type)) {
817     return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
818   }
819 
820   // Vector types may need to be legalized.
821   if (isa<VectorType>(type)) {
822     SwiftAggLowering lowering(CGM);
823     lowering.addTypedData(type, CharUnits::Zero());
824     lowering.finish();
825 
826     CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
827     return classifyExpandedType(lowering, forReturn, alignment);
828   }
829 
830   // Member pointer types need to be expanded, but it's a simple form of
831   // expansion that 'Direct' can handle.  Note that CanBeFlattened should be
832   // true for this to work.
833 
834   // 'void' needs to be ignored.
835   if (type->isVoidType()) {
836     return ABIArgInfo::getIgnore();
837   }
838 
839   // Everything else can be passed directly.
840   return ABIArgInfo::getDirect();
841 }
842 
classifyReturnType(CodeGenModule & CGM,CanQualType type)843 ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) {
844   return classifyType(CGM, type, /*forReturn*/ true);
845 }
846 
classifyArgumentType(CodeGenModule & CGM,CanQualType type)847 ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM,
848                                            CanQualType type) {
849   return classifyType(CGM, type, /*forReturn*/ false);
850 }
851 
computeABIInfo(CodeGenModule & CGM,CGFunctionInfo & FI)852 void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
853   auto &retInfo = FI.getReturnInfo();
854   retInfo = classifyReturnType(CGM, FI.getReturnType());
855 
856   for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) {
857     auto &argInfo = FI.arg_begin()[i];
858     argInfo.info = classifyArgumentType(CGM, argInfo.type);
859   }
860 }
861 
862 // Is swifterror lowered to a register by the target ABI.
isSwiftErrorLoweredInRegister(CodeGenModule & CGM)863 bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) {
864   return getSwiftABIInfo(CGM).isSwiftErrorInRegister();
865 }
866