1 //===- AArch64.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 
12 using namespace clang;
13 using namespace clang::CodeGen;
14 
15 //===----------------------------------------------------------------------===//
16 // AArch64 ABI Implementation
17 //===----------------------------------------------------------------------===//
18 
19 namespace {
20 
21 class AArch64ABIInfo : public ABIInfo {
22   AArch64ABIKind Kind;
23 
24 public:
AArch64ABIInfo(CodeGenTypes & CGT,AArch64ABIKind Kind)25   AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
26       : ABIInfo(CGT), Kind(Kind) {}
27 
28 private:
getABIKind() const29   AArch64ABIKind getABIKind() const { return Kind; }
isDarwinPCS() const30   bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
31 
32   ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
33   ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
34                                   unsigned CallingConvention) const;
35   ABIArgInfo coerceIllegalVector(QualType Ty) const;
36   bool isHomogeneousAggregateBaseType(QualType Ty) const override;
37   bool isHomogeneousAggregateSmallEnough(const Type *Ty,
38                                          uint64_t Members) const override;
39   bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
40 
41   bool isIllegalVectorType(QualType Ty) const;
42 
computeInfo(CGFunctionInfo & FI) const43   void computeInfo(CGFunctionInfo &FI) const override {
44     if (!::classifyReturnType(getCXXABI(), FI, *this))
45       FI.getReturnInfo() =
46           classifyReturnType(FI.getReturnType(), FI.isVariadic());
47 
48     for (auto &it : FI.arguments())
49       it.info = classifyArgumentType(it.type, FI.isVariadic(),
50                                      FI.getCallingConvention());
51   }
52 
53   Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
54                           CodeGenFunction &CGF) const;
55 
56   Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
57                          CodeGenFunction &CGF) const;
58 
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const59   Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
60                     QualType Ty) const override {
61     llvm::Type *BaseTy = CGF.ConvertType(Ty);
62     if (isa<llvm::ScalableVectorType>(BaseTy))
63       llvm::report_fatal_error("Passing SVE types to variadic functions is "
64                                "currently not supported");
65 
66     return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
67            : isDarwinPCS()               ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
68                                          : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
69   }
70 
71   Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
72                       QualType Ty) const override;
73 
allowBFloatArgsAndRet() const74   bool allowBFloatArgsAndRet() const override {
75     return getTarget().hasBFloat16Type();
76   }
77 };
78 
79 class AArch64SwiftABIInfo : public SwiftABIInfo {
80 public:
AArch64SwiftABIInfo(CodeGenTypes & CGT)81   explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
82       : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
83 
84   bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
85                          unsigned NumElts) const override;
86 };
87 
88 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
89 public:
AArch64TargetCodeGenInfo(CodeGenTypes & CGT,AArch64ABIKind Kind)90   AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
91       : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
92     SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
93   }
94 
getARCRetainAutoreleasedReturnValueMarker() const95   StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
96     return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
97   }
98 
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const99   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
100     return 31;
101   }
102 
doesReturnSlotInterfereWithArgs() const103   bool doesReturnSlotInterfereWithArgs() const override { return false; }
104 
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const105   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
106                            CodeGen::CodeGenModule &CGM) const override {
107     const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
108     if (!FD)
109       return;
110 
111     const auto *TA = FD->getAttr<TargetAttr>();
112     if (TA == nullptr)
113       return;
114 
115     ParsedTargetAttr Attr =
116         CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
117     if (Attr.BranchProtection.empty())
118       return;
119 
120     TargetInfo::BranchProtectionInfo BPI;
121     StringRef Error;
122     (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
123                                                    Attr.CPU, BPI, Error);
124     assert(Error.empty());
125 
126     auto *Fn = cast<llvm::Function>(GV);
127     static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
128     Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
129 
130     if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
131       Fn->addFnAttr("sign-return-address-key",
132                     BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
133                         ? "a_key"
134                         : "b_key");
135     }
136 
137     Fn->addFnAttr("branch-target-enforcement",
138                   BPI.BranchTargetEnforcement ? "true" : "false");
139     Fn->addFnAttr("branch-protection-pauth-lr",
140                   BPI.BranchProtectionPAuthLR ? "true" : "false");
141     Fn->addFnAttr("guarded-control-stack",
142                   BPI.GuardedControlStack ? "true" : "false");
143   }
144 
isScalarizableAsmOperand(CodeGen::CodeGenFunction & CGF,llvm::Type * Ty) const145   bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
146                                 llvm::Type *Ty) const override {
147     if (CGF.getTarget().hasFeature("ls64")) {
148       auto *ST = dyn_cast<llvm::StructType>(Ty);
149       if (ST && ST->getNumElements() == 1) {
150         auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
151         if (AT && AT->getNumElements() == 8 &&
152             AT->getElementType()->isIntegerTy(64))
153           return true;
154       }
155     }
156     return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
157   }
158 };
159 
160 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
161 public:
WindowsAArch64TargetCodeGenInfo(CodeGenTypes & CGT,AArch64ABIKind K)162   WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
163       : AArch64TargetCodeGenInfo(CGT, K) {}
164 
165   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
166                            CodeGen::CodeGenModule &CGM) const override;
167 
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const168   void getDependentLibraryOption(llvm::StringRef Lib,
169                                  llvm::SmallString<24> &Opt) const override {
170     Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
171   }
172 
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const173   void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
174                                llvm::SmallString<32> &Opt) const override {
175     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
176   }
177 };
178 
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const179 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
180     const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
181   AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
182   if (GV->isDeclaration())
183     return;
184   addStackProbeTargetAttributes(D, GV, CGM);
185 }
186 }
187 
coerceIllegalVector(QualType Ty) const188 ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
189   assert(Ty->isVectorType() && "expected vector type!");
190 
191   const auto *VT = Ty->castAs<VectorType>();
192   if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
193     assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
194     assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
195                BuiltinType::UChar &&
196            "unexpected builtin type for SVE predicate!");
197     return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
198         llvm::Type::getInt1Ty(getVMContext()), 16));
199   }
200 
201   if (VT->getVectorKind() == VectorKind::SveFixedLengthData) {
202     assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
203 
204     const auto *BT = VT->getElementType()->castAs<BuiltinType>();
205     llvm::ScalableVectorType *ResType = nullptr;
206     switch (BT->getKind()) {
207     default:
208       llvm_unreachable("unexpected builtin type for SVE vector!");
209     case BuiltinType::SChar:
210     case BuiltinType::UChar:
211       ResType = llvm::ScalableVectorType::get(
212           llvm::Type::getInt8Ty(getVMContext()), 16);
213       break;
214     case BuiltinType::Short:
215     case BuiltinType::UShort:
216       ResType = llvm::ScalableVectorType::get(
217           llvm::Type::getInt16Ty(getVMContext()), 8);
218       break;
219     case BuiltinType::Int:
220     case BuiltinType::UInt:
221       ResType = llvm::ScalableVectorType::get(
222           llvm::Type::getInt32Ty(getVMContext()), 4);
223       break;
224     case BuiltinType::Long:
225     case BuiltinType::ULong:
226       ResType = llvm::ScalableVectorType::get(
227           llvm::Type::getInt64Ty(getVMContext()), 2);
228       break;
229     case BuiltinType::Half:
230       ResType = llvm::ScalableVectorType::get(
231           llvm::Type::getHalfTy(getVMContext()), 8);
232       break;
233     case BuiltinType::Float:
234       ResType = llvm::ScalableVectorType::get(
235           llvm::Type::getFloatTy(getVMContext()), 4);
236       break;
237     case BuiltinType::Double:
238       ResType = llvm::ScalableVectorType::get(
239           llvm::Type::getDoubleTy(getVMContext()), 2);
240       break;
241     case BuiltinType::BFloat16:
242       ResType = llvm::ScalableVectorType::get(
243           llvm::Type::getBFloatTy(getVMContext()), 8);
244       break;
245     }
246     return ABIArgInfo::getDirect(ResType);
247   }
248 
249   uint64_t Size = getContext().getTypeSize(Ty);
250   // Android promotes <2 x i8> to i16, not i32
251   if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
252     llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
253     return ABIArgInfo::getDirect(ResType);
254   }
255   if (Size <= 32) {
256     llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
257     return ABIArgInfo::getDirect(ResType);
258   }
259   if (Size == 64) {
260     auto *ResType =
261         llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
262     return ABIArgInfo::getDirect(ResType);
263   }
264   if (Size == 128) {
265     auto *ResType =
266         llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
267     return ABIArgInfo::getDirect(ResType);
268   }
269   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
270 }
271 
272 ABIArgInfo
classifyArgumentType(QualType Ty,bool IsVariadic,unsigned CallingConvention) const273 AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
274                                      unsigned CallingConvention) const {
275   Ty = useFirstFieldIfTransparentUnion(Ty);
276 
277   // Handle illegal vector types here.
278   if (isIllegalVectorType(Ty))
279     return coerceIllegalVector(Ty);
280 
281   if (!isAggregateTypeForABI(Ty)) {
282     // Treat an enum type as its underlying type.
283     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
284       Ty = EnumTy->getDecl()->getIntegerType();
285 
286     if (const auto *EIT = Ty->getAs<BitIntType>())
287       if (EIT->getNumBits() > 128)
288         return getNaturalAlignIndirect(Ty);
289 
290     return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
291                 ? ABIArgInfo::getExtend(Ty)
292                 : ABIArgInfo::getDirect());
293   }
294 
295   // Structures with either a non-trivial destructor or a non-trivial
296   // copy constructor are always indirect.
297   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
298     return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
299                                      CGCXXABI::RAA_DirectInMemory);
300   }
301 
302   // Empty records are always ignored on Darwin, but actually passed in C++ mode
303   // elsewhere for GNU compatibility.
304   uint64_t Size = getContext().getTypeSize(Ty);
305   bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
306   if (IsEmpty || Size == 0) {
307     if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
308       return ABIArgInfo::getIgnore();
309 
310     // GNU C mode. The only argument that gets ignored is an empty one with size
311     // 0.
312     if (IsEmpty && Size == 0)
313       return ABIArgInfo::getIgnore();
314     return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
315   }
316 
317   // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
318   const Type *Base = nullptr;
319   uint64_t Members = 0;
320   bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
321                  CallingConvention == llvm::CallingConv::Win64;
322   bool IsWinVariadic = IsWin64 && IsVariadic;
323   // In variadic functions on Windows, all composite types are treated alike,
324   // no special handling of HFAs/HVAs.
325   if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
326     if (Kind != AArch64ABIKind::AAPCS)
327       return ABIArgInfo::getDirect(
328           llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
329 
330     // For HFAs/HVAs, cap the argument alignment to 16, otherwise
331     // set it to 8 according to the AAPCS64 document.
332     unsigned Align =
333         getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
334     Align = (Align >= 16) ? 16 : 8;
335     return ABIArgInfo::getDirect(
336         llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
337         nullptr, true, Align);
338   }
339 
340   // Aggregates <= 16 bytes are passed directly in registers or on the stack.
341   if (Size <= 128) {
342     // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
343     // same size and alignment.
344     if (getTarget().isRenderScriptTarget()) {
345       return coerceToIntArray(Ty, getContext(), getVMContext());
346     }
347     unsigned Alignment;
348     if (Kind == AArch64ABIKind::AAPCS) {
349       Alignment = getContext().getTypeUnadjustedAlign(Ty);
350       Alignment = Alignment < 128 ? 64 : 128;
351     } else {
352       Alignment =
353           std::max(getContext().getTypeAlign(Ty),
354                    (unsigned)getTarget().getPointerWidth(LangAS::Default));
355     }
356     Size = llvm::alignTo(Size, Alignment);
357 
358     // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
359     // For aggregates with 16-byte alignment, we use i128.
360     llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
361     return ABIArgInfo::getDirect(
362         Size == Alignment ? BaseTy
363                           : llvm::ArrayType::get(BaseTy, Size / Alignment));
364   }
365 
366   return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
367 }
368 
classifyReturnType(QualType RetTy,bool IsVariadic) const369 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
370                                               bool IsVariadic) const {
371   if (RetTy->isVoidType())
372     return ABIArgInfo::getIgnore();
373 
374   if (const auto *VT = RetTy->getAs<VectorType>()) {
375     if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
376         VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
377       return coerceIllegalVector(RetTy);
378   }
379 
380   // Large vector types should be returned via memory.
381   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
382     return getNaturalAlignIndirect(RetTy);
383 
384   if (!isAggregateTypeForABI(RetTy)) {
385     // Treat an enum type as its underlying type.
386     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
387       RetTy = EnumTy->getDecl()->getIntegerType();
388 
389     if (const auto *EIT = RetTy->getAs<BitIntType>())
390       if (EIT->getNumBits() > 128)
391         return getNaturalAlignIndirect(RetTy);
392 
393     return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
394                 ? ABIArgInfo::getExtend(RetTy)
395                 : ABIArgInfo::getDirect());
396   }
397 
398   uint64_t Size = getContext().getTypeSize(RetTy);
399   if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
400     return ABIArgInfo::getIgnore();
401 
402   const Type *Base = nullptr;
403   uint64_t Members = 0;
404   if (isHomogeneousAggregate(RetTy, Base, Members) &&
405       !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
406         IsVariadic))
407     // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
408     return ABIArgInfo::getDirect();
409 
410   // Aggregates <= 16 bytes are returned directly in registers or on the stack.
411   if (Size <= 128) {
412     // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
413     // same size and alignment.
414     if (getTarget().isRenderScriptTarget()) {
415       return coerceToIntArray(RetTy, getContext(), getVMContext());
416     }
417 
418     if (Size <= 64 && getDataLayout().isLittleEndian()) {
419       // Composite types are returned in lower bits of a 64-bit register for LE,
420       // and in higher bits for BE. However, integer types are always returned
421       // in lower bits for both LE and BE, and they are not rounded up to
422       // 64-bits. We can skip rounding up of composite types for LE, but not for
423       // BE, otherwise composite types will be indistinguishable from integer
424       // types.
425       return ABIArgInfo::getDirect(
426           llvm::IntegerType::get(getVMContext(), Size));
427     }
428 
429     unsigned Alignment = getContext().getTypeAlign(RetTy);
430     Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
431 
432     // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
433     // For aggregates with 16-byte alignment, we use i128.
434     if (Alignment < 128 && Size == 128) {
435       llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
436       return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
437     }
438     return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
439   }
440 
441   return getNaturalAlignIndirect(RetTy);
442 }
443 
444 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
isIllegalVectorType(QualType Ty) const445 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
446   if (const VectorType *VT = Ty->getAs<VectorType>()) {
447     // Check whether VT is a fixed-length SVE vector. These types are
448     // represented as scalable vectors in function args/return and must be
449     // coerced from fixed vectors.
450     if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
451         VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
452       return true;
453 
454     // Check whether VT is legal.
455     unsigned NumElements = VT->getNumElements();
456     uint64_t Size = getContext().getTypeSize(VT);
457     // NumElements should be power of 2.
458     if (!llvm::isPowerOf2_32(NumElements))
459       return true;
460 
461     // arm64_32 has to be compatible with the ARM logic here, which allows huge
462     // vectors for some reason.
463     llvm::Triple Triple = getTarget().getTriple();
464     if (Triple.getArch() == llvm::Triple::aarch64_32 &&
465         Triple.isOSBinFormatMachO())
466       return Size <= 32;
467 
468     return Size != 64 && (Size != 128 || NumElements == 1);
469   }
470   return false;
471 }
472 
isLegalVectorType(CharUnits VectorSize,llvm::Type * EltTy,unsigned NumElts) const473 bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
474                                             llvm::Type *EltTy,
475                                             unsigned NumElts) const {
476   if (!llvm::isPowerOf2_32(NumElts))
477     return false;
478   if (VectorSize.getQuantity() != 8 &&
479       (VectorSize.getQuantity() != 16 || NumElts == 1))
480     return false;
481   return true;
482 }
483 
isHomogeneousAggregateBaseType(QualType Ty) const484 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
485   // Homogeneous aggregates for AAPCS64 must have base types of a floating
486   // point type or a short-vector type. This is the same as the 32-bit ABI,
487   // but with the difference that any floating-point type is allowed,
488   // including __fp16.
489   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
490     if (BT->isFloatingPoint())
491       return true;
492   } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
493     unsigned VecSize = getContext().getTypeSize(VT);
494     if (VecSize == 64 || VecSize == 128)
495       return true;
496   }
497   return false;
498 }
499 
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const500 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
501                                                        uint64_t Members) const {
502   return Members <= 4;
503 }
504 
isZeroLengthBitfieldPermittedInHomogeneousAggregate() const505 bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
506     const {
507   // AAPCS64 says that the rule for whether something is a homogeneous
508   // aggregate is applied to the output of the data layout decision. So
509   // anything that doesn't affect the data layout also does not affect
510   // homogeneity. In particular, zero-length bitfields don't stop a struct
511   // being homogeneous.
512   return true;
513 }
514 
EmitAAPCSVAArg(Address VAListAddr,QualType Ty,CodeGenFunction & CGF) const515 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
516                                        CodeGenFunction &CGF) const {
517   ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
518                                        CGF.CurFnInfo->getCallingConvention());
519   // Empty records are ignored for parameter passing purposes.
520   if (AI.isIgnore()) {
521     uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
522     CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
523     VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
524     auto *Load = CGF.Builder.CreateLoad(VAListAddr);
525     return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
526   }
527 
528   bool IsIndirect = AI.isIndirect();
529 
530   llvm::Type *BaseTy = CGF.ConvertType(Ty);
531   if (IsIndirect)
532     BaseTy = llvm::PointerType::getUnqual(BaseTy);
533   else if (AI.getCoerceToType())
534     BaseTy = AI.getCoerceToType();
535 
536   unsigned NumRegs = 1;
537   if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
538     BaseTy = ArrTy->getElementType();
539     NumRegs = ArrTy->getNumElements();
540   }
541   bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
542 
543   // The AArch64 va_list type and handling is specified in the Procedure Call
544   // Standard, section B.4:
545   //
546   // struct {
547   //   void *__stack;
548   //   void *__gr_top;
549   //   void *__vr_top;
550   //   int __gr_offs;
551   //   int __vr_offs;
552   // };
553 
554   llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
555   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
556   llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
557   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
558 
559   CharUnits TySize = getContext().getTypeSizeInChars(Ty);
560   CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
561 
562   Address reg_offs_p = Address::invalid();
563   llvm::Value *reg_offs = nullptr;
564   int reg_top_index;
565   int RegSize = IsIndirect ? 8 : TySize.getQuantity();
566   if (!IsFPR) {
567     // 3 is the field number of __gr_offs
568     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
569     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
570     reg_top_index = 1; // field number for __gr_top
571     RegSize = llvm::alignTo(RegSize, 8);
572   } else {
573     // 4 is the field number of __vr_offs.
574     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
575     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
576     reg_top_index = 2; // field number for __vr_top
577     RegSize = 16 * NumRegs;
578   }
579 
580   //=======================================
581   // Find out where argument was passed
582   //=======================================
583 
584   // If reg_offs >= 0 we're already using the stack for this type of
585   // argument. We don't want to keep updating reg_offs (in case it overflows,
586   // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
587   // whatever they get).
588   llvm::Value *UsingStack = nullptr;
589   UsingStack = CGF.Builder.CreateICmpSGE(
590       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
591 
592   CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
593 
594   // Otherwise, at least some kind of argument could go in these registers, the
595   // question is whether this particular type is too big.
596   CGF.EmitBlock(MaybeRegBlock);
597 
598   // Integer arguments may need to correct register alignment (for example a
599   // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
600   // align __gr_offs to calculate the potential address.
601   if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
602     int Align = TyAlign.getQuantity();
603 
604     reg_offs = CGF.Builder.CreateAdd(
605         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
606         "align_regoffs");
607     reg_offs = CGF.Builder.CreateAnd(
608         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
609         "aligned_regoffs");
610   }
611 
612   // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
613   // The fact that this is done unconditionally reflects the fact that
614   // allocating an argument to the stack also uses up all the remaining
615   // registers of the appropriate kind.
616   llvm::Value *NewOffset = nullptr;
617   NewOffset = CGF.Builder.CreateAdd(
618       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
619   CGF.Builder.CreateStore(NewOffset, reg_offs_p);
620 
621   // Now we're in a position to decide whether this argument really was in
622   // registers or not.
623   llvm::Value *InRegs = nullptr;
624   InRegs = CGF.Builder.CreateICmpSLE(
625       NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
626 
627   CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
628 
629   //=======================================
630   // Argument was in registers
631   //=======================================
632 
633   // Now we emit the code for if the argument was originally passed in
634   // registers. First start the appropriate block:
635   CGF.EmitBlock(InRegBlock);
636 
637   llvm::Value *reg_top = nullptr;
638   Address reg_top_p =
639       CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
640   reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
641   Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
642                    CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
643   Address RegAddr = Address::invalid();
644   llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
645 
646   if (IsIndirect) {
647     // If it's been passed indirectly (actually a struct), whatever we find from
648     // stored registers or on the stack will actually be a struct **.
649     MemTy = llvm::PointerType::getUnqual(MemTy);
650   }
651 
652   const Type *Base = nullptr;
653   uint64_t NumMembers = 0;
654   bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
655   if (IsHFA && NumMembers > 1) {
656     // Homogeneous aggregates passed in registers will have their elements split
657     // and stored 16-bytes apart regardless of size (they're notionally in qN,
658     // qN+1, ...). We reload and store into a temporary local variable
659     // contiguously.
660     assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
661     auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
662     llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
663     llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
664     Address Tmp = CGF.CreateTempAlloca(HFATy,
665                                        std::max(TyAlign, BaseTyInfo.Align));
666 
667     // On big-endian platforms, the value will be right-aligned in its slot.
668     int Offset = 0;
669     if (CGF.CGM.getDataLayout().isBigEndian() &&
670         BaseTyInfo.Width.getQuantity() < 16)
671       Offset = 16 - BaseTyInfo.Width.getQuantity();
672 
673     for (unsigned i = 0; i < NumMembers; ++i) {
674       CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
675       Address LoadAddr =
676         CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
677       LoadAddr = LoadAddr.withElementType(BaseTy);
678 
679       Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
680 
681       llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
682       CGF.Builder.CreateStore(Elem, StoreAddr);
683     }
684 
685     RegAddr = Tmp.withElementType(MemTy);
686   } else {
687     // Otherwise the object is contiguous in memory.
688 
689     // It might be right-aligned in its slot.
690     CharUnits SlotSize = BaseAddr.getAlignment();
691     if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
692         (IsHFA || !isAggregateTypeForABI(Ty)) &&
693         TySize < SlotSize) {
694       CharUnits Offset = SlotSize - TySize;
695       BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
696     }
697 
698     RegAddr = BaseAddr.withElementType(MemTy);
699   }
700 
701   CGF.EmitBranch(ContBlock);
702 
703   //=======================================
704   // Argument was on the stack
705   //=======================================
706   CGF.EmitBlock(OnStackBlock);
707 
708   Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
709   llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
710 
711   // Again, stack arguments may need realignment. In this case both integer and
712   // floating-point ones might be affected.
713   if (!IsIndirect && TyAlign.getQuantity() > 8) {
714     int Align = TyAlign.getQuantity();
715 
716     OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
717 
718     OnStackPtr = CGF.Builder.CreateAdd(
719         OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
720         "align_stack");
721     OnStackPtr = CGF.Builder.CreateAnd(
722         OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
723         "align_stack");
724 
725     OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
726   }
727   Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
728                                 std::max(CharUnits::fromQuantity(8), TyAlign));
729 
730   // All stack slots are multiples of 8 bytes.
731   CharUnits StackSlotSize = CharUnits::fromQuantity(8);
732   CharUnits StackSize;
733   if (IsIndirect)
734     StackSize = StackSlotSize;
735   else
736     StackSize = TySize.alignTo(StackSlotSize);
737 
738   llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
739   llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
740       CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
741 
742   // Write the new value of __stack for the next call to va_arg
743   CGF.Builder.CreateStore(NewStack, stack_p);
744 
745   if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
746       TySize < StackSlotSize) {
747     CharUnits Offset = StackSlotSize - TySize;
748     OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
749   }
750 
751   OnStackAddr = OnStackAddr.withElementType(MemTy);
752 
753   CGF.EmitBranch(ContBlock);
754 
755   //=======================================
756   // Tidy up
757   //=======================================
758   CGF.EmitBlock(ContBlock);
759 
760   Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
761                                  OnStackBlock, "vaargs.addr");
762 
763   if (IsIndirect)
764     return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
765                    TyAlign);
766 
767   return ResAddr;
768 }
769 
EmitDarwinVAArg(Address VAListAddr,QualType Ty,CodeGenFunction & CGF) const770 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
771                                         CodeGenFunction &CGF) const {
772   // The backend's lowering doesn't support va_arg for aggregates or
773   // illegal vector types.  Lower VAArg here for these cases and use
774   // the LLVM va_arg instruction for everything else.
775   if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
776     return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
777 
778   uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
779   CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
780 
781   // Empty records are ignored for parameter passing purposes.
782   if (isEmptyRecord(getContext(), Ty, true))
783     return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
784                    CGF.ConvertTypeForMem(Ty), SlotSize);
785 
786   // The size of the actual thing passed, which might end up just
787   // being a pointer for indirect types.
788   auto TyInfo = getContext().getTypeInfoInChars(Ty);
789 
790   // Arguments bigger than 16 bytes which aren't homogeneous
791   // aggregates should be passed indirectly.
792   bool IsIndirect = false;
793   if (TyInfo.Width.getQuantity() > 16) {
794     const Type *Base = nullptr;
795     uint64_t Members = 0;
796     IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
797   }
798 
799   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
800                           TyInfo, SlotSize, /*AllowHigherAlign*/ true);
801 }
802 
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const803 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
804                                     QualType Ty) const {
805   bool IsIndirect = false;
806 
807   // Composites larger than 16 bytes are passed by reference.
808   if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
809     IsIndirect = true;
810 
811   return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
812                           CGF.getContext().getTypeInfoInChars(Ty),
813                           CharUnits::fromQuantity(8),
814                           /*allowHigherAlign*/ false);
815 }
816 
817 std::unique_ptr<TargetCodeGenInfo>
createAArch64TargetCodeGenInfo(CodeGenModule & CGM,AArch64ABIKind Kind)818 CodeGen::createAArch64TargetCodeGenInfo(CodeGenModule &CGM,
819                                         AArch64ABIKind Kind) {
820   return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
821 }
822 
823 std::unique_ptr<TargetCodeGenInfo>
createWindowsAArch64TargetCodeGenInfo(CodeGenModule & CGM,AArch64ABIKind K)824 CodeGen::createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM,
825                                                AArch64ABIKind K) {
826   return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
827 }
828