1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
44                                      const TargetOptions &Opts)
45     : TargetInfo(Triple), ABI("aapcs") {
46   if (getTriple().isOSOpenBSD()) {
47     Int64Type = SignedLongLong;
48     IntMaxType = SignedLongLong;
49   } else {
50     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
51       WCharType = UnsignedInt;
52 
53     Int64Type = SignedLong;
54     IntMaxType = SignedLong;
55   }
56 
57   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
58   HasLegalHalfType = true;
59   HasFloat16 = true;
60 
61   if (Triple.isArch64Bit())
62     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
63   else
64     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
65 
66   MaxVectorAlign = 128;
67   MaxAtomicInlineWidth = 128;
68   MaxAtomicPromoteWidth = 128;
69 
70   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
71   LongDoubleFormat = &llvm::APFloat::IEEEquad();
72 
73   BFloat16Width = BFloat16Align = 16;
74   BFloat16Format = &llvm::APFloat::BFloat();
75 
76   // Make __builtin_ms_va_list available.
77   HasBuiltinMSVaList = true;
78 
79   // Make the SVE types available.  Note that this deliberately doesn't
80   // depend on SveMode, since in principle it should be possible to turn
81   // SVE on and off within a translation unit.  It should also be possible
82   // to compile the global declaration:
83   //
84   // __SVInt8_t *ptr;
85   //
86   // even without SVE.
87   HasAArch64SVETypes = true;
88 
89   // {} in inline assembly are neon specifiers, not assembly variant
90   // specifiers.
91   NoAsmVariants = true;
92 
93   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
94   // contributes to the alignment of the containing aggregate in the same way
95   // a plain (non bit-field) member of that type would, without exception for
96   // zero-sized or anonymous bit-fields."
97   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
98   UseZeroLengthBitfieldAlignment = true;
99 
100   // AArch64 targets default to using the ARM C++ ABI.
101   TheCXXABI.set(TargetCXXABI::GenericAArch64);
102 
103   if (Triple.getOS() == llvm::Triple::Linux)
104     this->MCountName = "\01_mcount";
105   else if (Triple.getOS() == llvm::Triple::UnknownOS)
106     this->MCountName =
107         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
108 }
109 
110 StringRef AArch64TargetInfo::getABI() const { return ABI; }
111 
112 bool AArch64TargetInfo::setABI(const std::string &Name) {
113   if (Name != "aapcs" && Name != "darwinpcs")
114     return false;
115 
116   ABI = Name;
117   return true;
118 }
119 
120 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
121                                                  BranchProtectionInfo &BPI,
122                                                  StringRef &Err) const {
123   llvm::AArch64::ParsedBranchProtection PBP;
124   if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
125     return false;
126 
127   BPI.SignReturnAddr =
128       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
129           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
130           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
131           .Default(LangOptions::SignReturnAddressScopeKind::None);
132 
133   if (PBP.Key == "a_key")
134     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
135   else
136     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
137 
138   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
139   return true;
140 }
141 
142 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
143   return Name == "generic" ||
144          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
145 }
146 
147 bool AArch64TargetInfo::setCPU(const std::string &Name) {
148   return isValidCPUName(Name);
149 }
150 
151 void AArch64TargetInfo::fillValidCPUList(
152     SmallVectorImpl<StringRef> &Values) const {
153   llvm::AArch64::fillValidCPUArchList(Values);
154 }
155 
156 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
157                                                 MacroBuilder &Builder) const {
158   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
159   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
160   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
161 }
162 
163 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
164                                                 MacroBuilder &Builder) const {
165   // Also include the ARMv8.1 defines
166   getTargetDefinesARMV81A(Opts, Builder);
167 }
168 
169 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
170                                                 MacroBuilder &Builder) const {
171   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
172   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
173   // Also include the Armv8.2 defines
174   getTargetDefinesARMV82A(Opts, Builder);
175 }
176 
177 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
178                                                 MacroBuilder &Builder) const {
179   // Also include the Armv8.3 defines
180   getTargetDefinesARMV83A(Opts, Builder);
181 }
182 
183 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
184                                                 MacroBuilder &Builder) const {
185   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
186   // Also include the Armv8.4 defines
187   getTargetDefinesARMV84A(Opts, Builder);
188 }
189 
190 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
191                                                 MacroBuilder &Builder) const {
192   // Also include the Armv8.5 defines
193   // FIXME: Armv8.6 makes the following extensions mandatory:
194   // - __ARM_FEATURE_BF16
195   // - __ARM_FEATURE_MATMUL_INT8
196   // Handle them here.
197   getTargetDefinesARMV85A(Opts, Builder);
198 }
199 
200 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
201                                                 MacroBuilder &Builder) const {
202   // Also include the Armv8.6 defines
203   getTargetDefinesARMV86A(Opts, Builder);
204 }
205 
206 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
207                                          MacroBuilder &Builder) const {
208   // Target identification.
209   Builder.defineMacro("__aarch64__");
210   // For bare-metal.
211   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
212       getTriple().isOSBinFormatELF())
213     Builder.defineMacro("__ELF__");
214 
215   // Target properties.
216   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
217     Builder.defineMacro("_LP64");
218     Builder.defineMacro("__LP64__");
219   }
220 
221   std::string CodeModel = getTargetOpts().CodeModel;
222   if (CodeModel == "default")
223     CodeModel = "small";
224   for (char &c : CodeModel)
225     c = toupper(c);
226   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
227 
228   // ACLE predefines. Many can only have one possible value on v8 AArch64.
229   Builder.defineMacro("__ARM_ACLE", "200");
230   Builder.defineMacro("__ARM_ARCH", "8");
231   Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
232 
233   Builder.defineMacro("__ARM_64BIT_STATE", "1");
234   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
235   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
236 
237   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
238   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
239   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
240   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
241   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
242   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
243   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
244 
245   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
246 
247   // 0xe implies support for half, single and double precision operations.
248   Builder.defineMacro("__ARM_FP", "0xE");
249 
250   // PCS specifies this for SysV variants, which is all we support. Other ABIs
251   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
252   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
253   Builder.defineMacro("__ARM_FP16_ARGS", "1");
254 
255   if (Opts.UnsafeFPMath)
256     Builder.defineMacro("__ARM_FP_FAST", "1");
257 
258   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
259                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
260 
261   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
262 
263   if (FPU & NeonMode) {
264     Builder.defineMacro("__ARM_NEON", "1");
265     // 64-bit NEON supports half, single and double precision operations.
266     Builder.defineMacro("__ARM_NEON_FP", "0xE");
267   }
268 
269   if (FPU & SveMode)
270     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
271 
272   if (HasSVE2)
273     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
274 
275   if (HasSVE2 && HasSVE2AES)
276     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
277 
278   if (HasSVE2 && HasSVE2BitPerm)
279     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
280 
281   if (HasSVE2 && HasSVE2SHA3)
282     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
283 
284   if (HasSVE2 && HasSVE2SM4)
285     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
286 
287   if (HasCRC)
288     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
289 
290   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
291   // macros for AES, SHA2, SHA3 and SM4
292   if (HasAES && HasSHA2)
293     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
294 
295   if (HasAES)
296     Builder.defineMacro("__ARM_FEATURE_AES", "1");
297 
298   if (HasSHA2)
299     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
300 
301   if (HasSHA3) {
302     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
303     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
304   }
305 
306   if (HasSM4) {
307     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
308     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
309   }
310 
311   if (HasUnaligned)
312     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
313 
314   if ((FPU & NeonMode) && HasFullFP16)
315     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
316   if (HasFullFP16)
317    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
318 
319   if (HasDotProd)
320     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
321 
322   if (HasMTE)
323     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
324 
325   if (HasTME)
326     Builder.defineMacro("__ARM_FEATURE_TME", "1");
327 
328   if (HasMatMul)
329     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
330 
331   if (HasLSE)
332     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
333 
334   if (HasBFloat16) {
335     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
336     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
337     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
338     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
339   }
340 
341   if ((FPU & SveMode) && HasBFloat16) {
342     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
343   }
344 
345   if ((FPU & SveMode) && HasMatmulFP64)
346     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
347 
348   if ((FPU & SveMode) && HasMatmulFP32)
349     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
350 
351   if ((FPU & SveMode) && HasMatMul)
352     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
353 
354   if ((FPU & NeonMode) && HasFP16FML)
355     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
356 
357   if (Opts.hasSignReturnAddress()) {
358     // Bitmask:
359     // 0: Protection using the A key
360     // 1: Protection using the B key
361     // 2: Protection including leaf functions
362     unsigned Value = 0;
363 
364     if (Opts.isSignReturnAddressWithAKey())
365       Value |= (1 << 0);
366     else
367       Value |= (1 << 1);
368 
369     if (Opts.isSignReturnAddressScopeAll())
370       Value |= (1 << 2);
371 
372     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
373   }
374 
375   if (Opts.BranchTargetEnforcement)
376     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
377 
378   if (HasLS64)
379     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
380 
381   if (HasRandGen)
382     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
383 
384   switch (ArchKind) {
385   default:
386     break;
387   case llvm::AArch64::ArchKind::ARMV8_1A:
388     getTargetDefinesARMV81A(Opts, Builder);
389     break;
390   case llvm::AArch64::ArchKind::ARMV8_2A:
391     getTargetDefinesARMV82A(Opts, Builder);
392     break;
393   case llvm::AArch64::ArchKind::ARMV8_3A:
394     getTargetDefinesARMV83A(Opts, Builder);
395     break;
396   case llvm::AArch64::ArchKind::ARMV8_4A:
397     getTargetDefinesARMV84A(Opts, Builder);
398     break;
399   case llvm::AArch64::ArchKind::ARMV8_5A:
400     getTargetDefinesARMV85A(Opts, Builder);
401     break;
402   case llvm::AArch64::ArchKind::ARMV8_6A:
403     getTargetDefinesARMV86A(Opts, Builder);
404     break;
405   case llvm::AArch64::ArchKind::ARMV8_7A:
406     getTargetDefinesARMV87A(Opts, Builder);
407     break;
408   }
409 
410   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
411   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
412   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
413   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
414   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
415 
416   if (Opts.ArmSveVectorBits) {
417     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
418     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
419   }
420 }
421 
422 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
423   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
424                                              Builtin::FirstTSBuiltin);
425 }
426 
427 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
428   return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
429          (Feature == "neon" && (FPU & NeonMode)) ||
430          ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
431            Feature == "sve2-aes" || Feature == "sve2-sha3" ||
432            Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
433            Feature == "i8mm" || Feature == "bf16") &&
434           (FPU & SveMode)) ||
435          (Feature == "ls64" && HasLS64);
436 }
437 
438 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
439                                              DiagnosticsEngine &Diags) {
440   FPU = FPUMode;
441   HasCRC = false;
442   HasCrypto = false;
443   HasAES = false;
444   HasSHA2 = false;
445   HasSHA3 = false;
446   HasSM4 = false;
447   HasUnaligned = true;
448   HasFullFP16 = false;
449   HasDotProd = false;
450   HasFP16FML = false;
451   HasMTE = false;
452   HasTME = false;
453   HasLS64 = false;
454   HasRandGen = false;
455   HasMatMul = false;
456   HasBFloat16 = false;
457   HasSVE2 = false;
458   HasSVE2AES = false;
459   HasSVE2SHA3 = false;
460   HasSVE2SM4 = false;
461   HasSVE2BitPerm = false;
462   HasMatmulFP64 = false;
463   HasMatmulFP32 = false;
464   HasLSE = false;
465 
466   ArchKind = llvm::AArch64::ArchKind::ARMV8A;
467 
468   for (const auto &Feature : Features) {
469     if (Feature == "+neon")
470       FPU |= NeonMode;
471     if (Feature == "+sve") {
472       FPU |= SveMode;
473       HasFullFP16 = 1;
474     }
475     if (Feature == "+sve2") {
476       FPU |= SveMode;
477       HasFullFP16 = 1;
478       HasSVE2 = 1;
479     }
480     if (Feature == "+sve2-aes") {
481       FPU |= SveMode;
482       HasFullFP16 = 1;
483       HasSVE2 = 1;
484       HasSVE2AES = 1;
485     }
486     if (Feature == "+sve2-sha3") {
487       FPU |= SveMode;
488       HasFullFP16 = 1;
489       HasSVE2 = 1;
490       HasSVE2SHA3 = 1;
491     }
492     if (Feature == "+sve2-sm4") {
493       FPU |= SveMode;
494       HasFullFP16 = 1;
495       HasSVE2 = 1;
496       HasSVE2SM4 = 1;
497     }
498     if (Feature == "+sve2-bitperm") {
499       FPU |= SveMode;
500       HasFullFP16 = 1;
501       HasSVE2 = 1;
502       HasSVE2BitPerm = 1;
503     }
504     if (Feature == "+f32mm") {
505       FPU |= SveMode;
506       HasMatmulFP32 = true;
507     }
508     if (Feature == "+f64mm") {
509       FPU |= SveMode;
510       HasMatmulFP64 = true;
511     }
512     if (Feature == "+crc")
513       HasCRC = true;
514     if (Feature == "+crypto")
515       HasCrypto = true;
516     if (Feature == "+aes")
517       HasAES = true;
518     if (Feature == "+sha2")
519       HasSHA2 = true;
520     if (Feature == "+sha3") {
521       HasSHA2 = true;
522       HasSHA3 = true;
523     }
524     if (Feature == "+sm4")
525       HasSM4 = true;
526     if (Feature == "+strict-align")
527       HasUnaligned = false;
528     if (Feature == "+v8.1a")
529       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
530     if (Feature == "+v8.2a")
531       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
532     if (Feature == "+v8.3a")
533       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
534     if (Feature == "+v8.4a")
535       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
536     if (Feature == "+v8.5a")
537       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
538     if (Feature == "+v8.6a")
539       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
540     if (Feature == "+v8.7a")
541       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
542     if (Feature == "+v8r")
543       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
544     if (Feature == "+fullfp16")
545       HasFullFP16 = true;
546     if (Feature == "+dotprod")
547       HasDotProd = true;
548     if (Feature == "+fp16fml")
549       HasFP16FML = true;
550     if (Feature == "+mte")
551       HasMTE = true;
552     if (Feature == "+tme")
553       HasTME = true;
554     if (Feature == "+pauth")
555       HasPAuth = true;
556     if (Feature == "+i8mm")
557       HasMatMul = true;
558     if (Feature == "+bf16")
559       HasBFloat16 = true;
560     if (Feature == "+lse")
561       HasLSE = true;
562     if (Feature == "+ls64")
563       HasLS64 = true;
564     if (Feature == "+rand")
565       HasRandGen = true;
566     if (Feature == "+flagm")
567       HasFlagM = true;
568   }
569 
570   setDataLayout();
571 
572   return true;
573 }
574 
575 TargetInfo::CallingConvCheckResult
576 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
577   switch (CC) {
578   case CC_C:
579   case CC_Swift:
580   case CC_SwiftAsync:
581   case CC_PreserveMost:
582   case CC_PreserveAll:
583   case CC_OpenCLKernel:
584   case CC_AArch64VectorCall:
585   case CC_Win64:
586     return CCCR_OK;
587   default:
588     return CCCR_Warning;
589   }
590 }
591 
592 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
593 
594 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
595   return TargetInfo::AArch64ABIBuiltinVaList;
596 }
597 
598 const char *const AArch64TargetInfo::GCCRegNames[] = {
599     // 32-bit Integer registers
600     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
601     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
602     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
603 
604     // 64-bit Integer registers
605     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
606     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
607     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
608 
609     // 32-bit floating point regsisters
610     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
611     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
612     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
613 
614     // 64-bit floating point regsisters
615     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
616     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
617     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
618 
619     // Neon vector registers
620     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
621     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
622     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
623 
624     // SVE vector registers
625     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
626     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
627     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
628 
629     // SVE predicate registers
630     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
631     "p11", "p12", "p13", "p14", "p15"
632 };
633 
634 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
635   return llvm::makeArrayRef(GCCRegNames);
636 }
637 
638 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
639     {{"w31"}, "wsp"},
640     {{"x31"}, "sp"},
641     // GCC rN registers are aliases of xN registers.
642     {{"r0"}, "x0"},
643     {{"r1"}, "x1"},
644     {{"r2"}, "x2"},
645     {{"r3"}, "x3"},
646     {{"r4"}, "x4"},
647     {{"r5"}, "x5"},
648     {{"r6"}, "x6"},
649     {{"r7"}, "x7"},
650     {{"r8"}, "x8"},
651     {{"r9"}, "x9"},
652     {{"r10"}, "x10"},
653     {{"r11"}, "x11"},
654     {{"r12"}, "x12"},
655     {{"r13"}, "x13"},
656     {{"r14"}, "x14"},
657     {{"r15"}, "x15"},
658     {{"r16"}, "x16"},
659     {{"r17"}, "x17"},
660     {{"r18"}, "x18"},
661     {{"r19"}, "x19"},
662     {{"r20"}, "x20"},
663     {{"r21"}, "x21"},
664     {{"r22"}, "x22"},
665     {{"r23"}, "x23"},
666     {{"r24"}, "x24"},
667     {{"r25"}, "x25"},
668     {{"r26"}, "x26"},
669     {{"r27"}, "x27"},
670     {{"r28"}, "x28"},
671     {{"r29", "x29"}, "fp"},
672     {{"r30", "x30"}, "lr"},
673     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
674     // don't want to substitute one of these for a different-sized one.
675 };
676 
677 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
678   return llvm::makeArrayRef(GCCRegAliases);
679 }
680 
681 bool AArch64TargetInfo::validateAsmConstraint(
682     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
683   switch (*Name) {
684   default:
685     return false;
686   case 'w': // Floating point and SIMD registers (V0-V31)
687     Info.setAllowsRegister();
688     return true;
689   case 'I': // Constant that can be used with an ADD instruction
690   case 'J': // Constant that can be used with a SUB instruction
691   case 'K': // Constant that can be used with a 32-bit logical instruction
692   case 'L': // Constant that can be used with a 64-bit logical instruction
693   case 'M': // Constant that can be used as a 32-bit MOV immediate
694   case 'N': // Constant that can be used as a 64-bit MOV immediate
695   case 'Y': // Floating point constant zero
696   case 'Z': // Integer constant zero
697     return true;
698   case 'Q': // A memory reference with base register and no offset
699     Info.setAllowsMemory();
700     return true;
701   case 'S': // A symbolic address
702     Info.setAllowsRegister();
703     return true;
704   case 'U':
705     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
706       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
707       Info.setAllowsRegister();
708       Name += 2;
709       return true;
710     }
711     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
712     // Utf: A memory address suitable for ldp/stp in TF mode.
713     // Usa: An absolute symbolic address.
714     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
715 
716     // Better to return an error saying that it's an unrecognised constraint
717     // even if this is a valid constraint in gcc.
718     return false;
719   case 'z': // Zero register, wzr or xzr
720     Info.setAllowsRegister();
721     return true;
722   case 'x': // Floating point and SIMD registers (V0-V15)
723     Info.setAllowsRegister();
724     return true;
725   case 'y': // SVE registers (V0-V7)
726     Info.setAllowsRegister();
727     return true;
728   }
729   return false;
730 }
731 
732 bool AArch64TargetInfo::validateConstraintModifier(
733     StringRef Constraint, char Modifier, unsigned Size,
734     std::string &SuggestedModifier) const {
735   // Strip off constraint modifiers.
736   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
737     Constraint = Constraint.substr(1);
738 
739   switch (Constraint[0]) {
740   default:
741     return true;
742   case 'z':
743   case 'r': {
744     switch (Modifier) {
745     case 'x':
746     case 'w':
747       // For now assume that the person knows what they're
748       // doing with the modifier.
749       return true;
750     default:
751       // By default an 'r' constraint will be in the 'x'
752       // registers.
753       if (Size == 64)
754         return true;
755 
756       if (Size == 512)
757         return HasLS64;
758 
759       SuggestedModifier = "w";
760       return false;
761     }
762   }
763   }
764 }
765 
766 const char *AArch64TargetInfo::getClobbers() const { return ""; }
767 
768 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
769   if (RegNo == 0)
770     return 0;
771   if (RegNo == 1)
772     return 1;
773   return -1;
774 }
775 
776 bool AArch64TargetInfo::hasInt128Type() const { return true; }
777 
778 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
779                                          const TargetOptions &Opts)
780     : AArch64TargetInfo(Triple, Opts) {}
781 
782 void AArch64leTargetInfo::setDataLayout() {
783   if (getTriple().isOSBinFormatMachO()) {
784     if(getTriple().isArch32Bit())
785       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
786     else
787       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
788   } else
789     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
790 }
791 
792 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
793                                            MacroBuilder &Builder) const {
794   Builder.defineMacro("__AARCH64EL__");
795   AArch64TargetInfo::getTargetDefines(Opts, Builder);
796 }
797 
798 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
799                                          const TargetOptions &Opts)
800     : AArch64TargetInfo(Triple, Opts) {}
801 
802 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
803                                            MacroBuilder &Builder) const {
804   Builder.defineMacro("__AARCH64EB__");
805   Builder.defineMacro("__AARCH_BIG_ENDIAN");
806   Builder.defineMacro("__ARM_BIG_ENDIAN");
807   AArch64TargetInfo::getTargetDefines(Opts, Builder);
808 }
809 
810 void AArch64beTargetInfo::setDataLayout() {
811   assert(!getTriple().isOSBinFormatMachO());
812   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
813 }
814 
815 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
816                                                const TargetOptions &Opts)
817     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
818 
819   // This is an LLP64 platform.
820   // int:4, long:4, long long:8, long double:8.
821   IntWidth = IntAlign = 32;
822   LongWidth = LongAlign = 32;
823   DoubleAlign = LongLongAlign = 64;
824   LongDoubleWidth = LongDoubleAlign = 64;
825   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
826   IntMaxType = SignedLongLong;
827   Int64Type = SignedLongLong;
828   SizeType = UnsignedLongLong;
829   PtrDiffType = SignedLongLong;
830   IntPtrType = SignedLongLong;
831 }
832 
833 void WindowsARM64TargetInfo::setDataLayout() {
834   resetDataLayout(Triple.isOSBinFormatMachO()
835                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
836                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
837                   Triple.isOSBinFormatMachO() ? "_" : "");
838 }
839 
840 TargetInfo::BuiltinVaListKind
841 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
842   return TargetInfo::CharPtrBuiltinVaList;
843 }
844 
845 TargetInfo::CallingConvCheckResult
846 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
847   switch (CC) {
848   case CC_X86StdCall:
849   case CC_X86ThisCall:
850   case CC_X86FastCall:
851   case CC_X86VectorCall:
852     return CCCR_Ignore;
853   case CC_C:
854   case CC_OpenCLKernel:
855   case CC_PreserveMost:
856   case CC_PreserveAll:
857   case CC_Swift:
858   case CC_SwiftAsync:
859   case CC_Win64:
860     return CCCR_OK;
861   default:
862     return CCCR_Warning;
863   }
864 }
865 
866 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
867                                                    const TargetOptions &Opts)
868     : WindowsARM64TargetInfo(Triple, Opts) {
869   TheCXXABI.set(TargetCXXABI::Microsoft);
870 }
871 
872 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
873                                                 MacroBuilder &Builder) const {
874   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
875   Builder.defineMacro("_M_ARM64", "1");
876 }
877 
878 TargetInfo::CallingConvKind
879 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
880   return CCK_MicrosoftWin64;
881 }
882 
883 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
884   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
885 
886   // MSVC does size based alignment for arm64 based on alignment section in
887   // below document, replicate that to keep alignment consistent with object
888   // files compiled by MSVC.
889   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
890   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
891     Align = std::max(Align, 128u);    // align type at least 16 bytes
892   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
893     Align = std::max(Align, 64u);     // align type at least 8 butes
894   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
895     Align = std::max(Align, 32u);     // align type at least 4 bytes
896   }
897   return Align;
898 }
899 
900 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
901                                            const TargetOptions &Opts)
902     : WindowsARM64TargetInfo(Triple, Opts) {
903   TheCXXABI.set(TargetCXXABI::GenericAArch64);
904 }
905 
906 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
907                                                  const TargetOptions &Opts)
908     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
909   Int64Type = SignedLongLong;
910   if (getTriple().isArch32Bit())
911     IntMaxType = SignedLongLong;
912 
913   WCharType = SignedInt;
914   UseSignedCharForObjCBool = false;
915 
916   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
917   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
918 
919   UseZeroLengthBitfieldAlignment = false;
920 
921   if (getTriple().isArch32Bit()) {
922     UseBitFieldTypeAlignment = false;
923     ZeroLengthBitfieldBoundary = 32;
924     UseZeroLengthBitfieldAlignment = true;
925     TheCXXABI.set(TargetCXXABI::WatchOS);
926   } else
927     TheCXXABI.set(TargetCXXABI::AppleARM64);
928 }
929 
930 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
931                                            const llvm::Triple &Triple,
932                                            MacroBuilder &Builder) const {
933   Builder.defineMacro("__AARCH64_SIMD__");
934   if (Triple.isArch32Bit())
935     Builder.defineMacro("__ARM64_ARCH_8_32__");
936   else
937     Builder.defineMacro("__ARM64_ARCH_8__");
938   Builder.defineMacro("__ARM_NEON__");
939   Builder.defineMacro("__LITTLE_ENDIAN__");
940   Builder.defineMacro("__REGISTER_PREFIX__", "");
941   Builder.defineMacro("__arm64", "1");
942   Builder.defineMacro("__arm64__", "1");
943 
944   if (Triple.isArm64e())
945     Builder.defineMacro("__arm64e__", "1");
946 
947   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
948 }
949 
950 TargetInfo::BuiltinVaListKind
951 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
952   return TargetInfo::CharPtrBuiltinVaList;
953 }
954 
955 // 64-bit RenderScript is aarch64
956 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
957                                                    const TargetOptions &Opts)
958     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
959                                        Triple.getOSName(),
960                                        Triple.getEnvironmentName()),
961                           Opts) {
962   IsRenderScriptTarget = true;
963 }
964 
965 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
966                                                 MacroBuilder &Builder) const {
967   Builder.defineMacro("__RENDERSCRIPT__");
968   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
969 }
970