1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
39   {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
40 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
41   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
42 #include "clang/Basic/BuiltinsAArch64.def"
43 };
44 
45 static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
46   switch (Kind) {
47   case llvm::AArch64::ArchKind::ARMV9A:
48   case llvm::AArch64::ArchKind::ARMV9_1A:
49   case llvm::AArch64::ArchKind::ARMV9_2A:
50   case llvm::AArch64::ArchKind::ARMV9_3A:
51     return "9";
52   default:
53     return "8";
54   }
55 }
56 
57 StringRef AArch64TargetInfo::getArchProfile() const {
58   switch (ArchKind) {
59   case llvm::AArch64::ArchKind::ARMV8R:
60     return "R";
61   default:
62     return "A";
63   }
64 }
65 
66 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
67                                      const TargetOptions &Opts)
68     : TargetInfo(Triple), ABI("aapcs") {
69   if (getTriple().isOSOpenBSD()) {
70     Int64Type = SignedLongLong;
71     IntMaxType = SignedLongLong;
72   } else {
73     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
74       WCharType = UnsignedInt;
75 
76     Int64Type = SignedLong;
77     IntMaxType = SignedLong;
78   }
79 
80   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
81   HasLegalHalfType = true;
82   HasFloat16 = true;
83 
84   if (Triple.isArch64Bit())
85     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
86   else
87     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
88 
89   MaxVectorAlign = 128;
90   MaxAtomicInlineWidth = 128;
91   MaxAtomicPromoteWidth = 128;
92 
93   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
94   LongDoubleFormat = &llvm::APFloat::IEEEquad();
95 
96   BFloat16Width = BFloat16Align = 16;
97   BFloat16Format = &llvm::APFloat::BFloat();
98 
99   // Make __builtin_ms_va_list available.
100   HasBuiltinMSVaList = true;
101 
102   // Make the SVE types available.  Note that this deliberately doesn't
103   // depend on SveMode, since in principle it should be possible to turn
104   // SVE on and off within a translation unit.  It should also be possible
105   // to compile the global declaration:
106   //
107   // __SVInt8_t *ptr;
108   //
109   // even without SVE.
110   HasAArch64SVETypes = true;
111 
112   // {} in inline assembly are neon specifiers, not assembly variant
113   // specifiers.
114   NoAsmVariants = true;
115 
116   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
117   // contributes to the alignment of the containing aggregate in the same way
118   // a plain (non bit-field) member of that type would, without exception for
119   // zero-sized or anonymous bit-fields."
120   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
121   UseZeroLengthBitfieldAlignment = true;
122 
123   // AArch64 targets default to using the ARM C++ ABI.
124   TheCXXABI.set(TargetCXXABI::GenericAArch64);
125 
126   if (Triple.getOS() == llvm::Triple::Linux)
127     this->MCountName = "\01_mcount";
128   else if (Triple.getOS() == llvm::Triple::UnknownOS)
129     this->MCountName =
130         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
131 }
132 
133 StringRef AArch64TargetInfo::getABI() const { return ABI; }
134 
135 bool AArch64TargetInfo::setABI(const std::string &Name) {
136   if (Name != "aapcs" && Name != "darwinpcs")
137     return false;
138 
139   ABI = Name;
140   return true;
141 }
142 
143 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
144                                                  BranchProtectionInfo &BPI,
145                                                  StringRef &Err) const {
146   llvm::ARM::ParsedBranchProtection PBP;
147   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
148     return false;
149 
150   BPI.SignReturnAddr =
151       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
152           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
153           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
154           .Default(LangOptions::SignReturnAddressScopeKind::None);
155 
156   if (PBP.Key == "a_key")
157     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
158   else
159     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
160 
161   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
162   return true;
163 }
164 
165 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
166   return Name == "generic" ||
167          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
168 }
169 
170 bool AArch64TargetInfo::setCPU(const std::string &Name) {
171   return isValidCPUName(Name);
172 }
173 
174 void AArch64TargetInfo::fillValidCPUList(
175     SmallVectorImpl<StringRef> &Values) const {
176   llvm::AArch64::fillValidCPUArchList(Values);
177 }
178 
179 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
180                                                 MacroBuilder &Builder) const {
181   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
182   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
183   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
184 }
185 
186 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
187                                                 MacroBuilder &Builder) const {
188   // Also include the ARMv8.1 defines
189   getTargetDefinesARMV81A(Opts, Builder);
190 }
191 
192 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
193                                                 MacroBuilder &Builder) const {
194   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
195   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
196   // Also include the Armv8.2 defines
197   getTargetDefinesARMV82A(Opts, Builder);
198 }
199 
200 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
201                                                 MacroBuilder &Builder) const {
202   // Also include the Armv8.3 defines
203   getTargetDefinesARMV83A(Opts, Builder);
204 }
205 
206 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
207                                                 MacroBuilder &Builder) const {
208   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
209   // Also include the Armv8.4 defines
210   getTargetDefinesARMV84A(Opts, Builder);
211 }
212 
213 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
214                                                 MacroBuilder &Builder) const {
215   // Also include the Armv8.5 defines
216   // FIXME: Armv8.6 makes the following extensions mandatory:
217   // - __ARM_FEATURE_BF16
218   // - __ARM_FEATURE_MATMUL_INT8
219   // Handle them here.
220   getTargetDefinesARMV85A(Opts, Builder);
221 }
222 
223 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
224                                                 MacroBuilder &Builder) const {
225   // Also include the Armv8.6 defines
226   getTargetDefinesARMV86A(Opts, Builder);
227 }
228 
229 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
230                                                 MacroBuilder &Builder) const {
231   // Also include the Armv8.7 defines
232   getTargetDefinesARMV87A(Opts, Builder);
233 }
234 
235 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
236                                                MacroBuilder &Builder) const {
237   // Armv9-A maps to Armv8.5-A
238   getTargetDefinesARMV85A(Opts, Builder);
239 }
240 
241 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
242                                                 MacroBuilder &Builder) const {
243   // Armv9.1-A maps to Armv8.6-A
244   getTargetDefinesARMV86A(Opts, Builder);
245 }
246 
247 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
248                                                 MacroBuilder &Builder) const {
249   // Armv9.2-A maps to Armv8.7-A
250   getTargetDefinesARMV87A(Opts, Builder);
251 }
252 
253 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
254                                                 MacroBuilder &Builder) const {
255   // Armv9.3-A maps to Armv8.8-A
256   getTargetDefinesARMV88A(Opts, Builder);
257 }
258 
259 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
260                                          MacroBuilder &Builder) const {
261   // Target identification.
262   Builder.defineMacro("__aarch64__");
263   // For bare-metal.
264   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
265       getTriple().isOSBinFormatELF())
266     Builder.defineMacro("__ELF__");
267 
268   // Target properties.
269   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
270     Builder.defineMacro("_LP64");
271     Builder.defineMacro("__LP64__");
272   }
273 
274   std::string CodeModel = getTargetOpts().CodeModel;
275   if (CodeModel == "default")
276     CodeModel = "small";
277   for (char &c : CodeModel)
278     c = toupper(c);
279   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
280 
281   // ACLE predefines. Many can only have one possible value on v8 AArch64.
282   Builder.defineMacro("__ARM_ACLE", "200");
283   Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
284   Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
285 
286   Builder.defineMacro("__ARM_64BIT_STATE", "1");
287   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
288   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
289 
290   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
291   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
292   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
293   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
294   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
295   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
296   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
297 
298   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
299 
300   // 0xe implies support for half, single and double precision operations.
301   Builder.defineMacro("__ARM_FP", "0xE");
302 
303   // PCS specifies this for SysV variants, which is all we support. Other ABIs
304   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
305   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
306   Builder.defineMacro("__ARM_FP16_ARGS", "1");
307 
308   if (Opts.UnsafeFPMath)
309     Builder.defineMacro("__ARM_FP_FAST", "1");
310 
311   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
312                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
313 
314   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
315 
316   if (FPU & NeonMode) {
317     Builder.defineMacro("__ARM_NEON", "1");
318     // 64-bit NEON supports half, single and double precision operations.
319     Builder.defineMacro("__ARM_NEON_FP", "0xE");
320   }
321 
322   if (FPU & SveMode)
323     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
324 
325   if ((FPU & NeonMode) && (FPU & SveMode))
326     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
327 
328   if (HasSVE2)
329     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
330 
331   if (HasSVE2 && HasSVE2AES)
332     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
333 
334   if (HasSVE2 && HasSVE2BitPerm)
335     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
336 
337   if (HasSVE2 && HasSVE2SHA3)
338     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
339 
340   if (HasSVE2 && HasSVE2SM4)
341     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
342 
343   if (HasCRC)
344     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
345 
346   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
347   // macros for AES, SHA2, SHA3 and SM4
348   if (HasAES && HasSHA2)
349     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
350 
351   if (HasAES)
352     Builder.defineMacro("__ARM_FEATURE_AES", "1");
353 
354   if (HasSHA2)
355     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
356 
357   if (HasSHA3) {
358     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
359     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
360   }
361 
362   if (HasSM4) {
363     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
364     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
365   }
366 
367   if (HasUnaligned)
368     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
369 
370   if ((FPU & NeonMode) && HasFullFP16)
371     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
372   if (HasFullFP16)
373    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
374 
375   if (HasDotProd)
376     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
377 
378   if (HasMTE)
379     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
380 
381   if (HasTME)
382     Builder.defineMacro("__ARM_FEATURE_TME", "1");
383 
384   if (HasMatMul)
385     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
386 
387   if (HasLSE)
388     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
389 
390   if (HasBFloat16) {
391     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
392     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
393     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
394     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
395   }
396 
397   if ((FPU & SveMode) && HasBFloat16) {
398     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
399   }
400 
401   if ((FPU & SveMode) && HasMatmulFP64)
402     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
403 
404   if ((FPU & SveMode) && HasMatmulFP32)
405     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
406 
407   if ((FPU & SveMode) && HasMatMul)
408     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
409 
410   if ((FPU & NeonMode) && HasFP16FML)
411     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
412 
413   if (Opts.hasSignReturnAddress()) {
414     // Bitmask:
415     // 0: Protection using the A key
416     // 1: Protection using the B key
417     // 2: Protection including leaf functions
418     unsigned Value = 0;
419 
420     if (Opts.isSignReturnAddressWithAKey())
421       Value |= (1 << 0);
422     else
423       Value |= (1 << 1);
424 
425     if (Opts.isSignReturnAddressScopeAll())
426       Value |= (1 << 2);
427 
428     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
429   }
430 
431   if (Opts.BranchTargetEnforcement)
432     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
433 
434   if (HasLS64)
435     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
436 
437   if (HasRandGen)
438     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
439 
440   if (HasMOPS)
441     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
442 
443   switch (ArchKind) {
444   default:
445     break;
446   case llvm::AArch64::ArchKind::ARMV8_1A:
447     getTargetDefinesARMV81A(Opts, Builder);
448     break;
449   case llvm::AArch64::ArchKind::ARMV8_2A:
450     getTargetDefinesARMV82A(Opts, Builder);
451     break;
452   case llvm::AArch64::ArchKind::ARMV8_3A:
453     getTargetDefinesARMV83A(Opts, Builder);
454     break;
455   case llvm::AArch64::ArchKind::ARMV8_4A:
456     getTargetDefinesARMV84A(Opts, Builder);
457     break;
458   case llvm::AArch64::ArchKind::ARMV8_5A:
459     getTargetDefinesARMV85A(Opts, Builder);
460     break;
461   case llvm::AArch64::ArchKind::ARMV8_6A:
462     getTargetDefinesARMV86A(Opts, Builder);
463     break;
464   case llvm::AArch64::ArchKind::ARMV8_7A:
465     getTargetDefinesARMV87A(Opts, Builder);
466     break;
467   case llvm::AArch64::ArchKind::ARMV8_8A:
468     getTargetDefinesARMV88A(Opts, Builder);
469     break;
470   case llvm::AArch64::ArchKind::ARMV9A:
471     getTargetDefinesARMV9A(Opts, Builder);
472     break;
473   case llvm::AArch64::ArchKind::ARMV9_1A:
474     getTargetDefinesARMV91A(Opts, Builder);
475     break;
476   case llvm::AArch64::ArchKind::ARMV9_2A:
477     getTargetDefinesARMV92A(Opts, Builder);
478     break;
479   case llvm::AArch64::ArchKind::ARMV9_3A:
480     getTargetDefinesARMV93A(Opts, Builder);
481     break;
482   }
483 
484   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
485   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
486   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
487   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
488   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
489 
490   // Allow detection of fast FMA support.
491   Builder.defineMacro("__FP_FAST_FMA", "1");
492   Builder.defineMacro("__FP_FAST_FMAF", "1");
493 
494   // C/C++ operators work on both VLS and VLA SVE types
495   if (FPU & SveMode)
496     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
497 
498   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
499     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
500   }
501 }
502 
503 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
504   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
505                                              Builtin::FirstTSBuiltin);
506 }
507 
508 Optional<std::pair<unsigned, unsigned>>
509 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
510   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
511     return std::pair<unsigned, unsigned>(
512         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
513 
514   if (hasFeature("sve"))
515     return std::pair<unsigned, unsigned>(1, 16);
516 
517   return None;
518 }
519 
520 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
521   return llvm::StringSwitch<bool>(Feature)
522     .Cases("aarch64", "arm64", "arm", true)
523     .Case("neon", FPU & NeonMode)
524     .Cases("sve", "sve2", "sve2-bitperm", "sve2-aes", "sve2-sha3", "sve2-sm4", "f64mm", "f32mm", "i8mm", "bf16", FPU & SveMode)
525     .Case("ls64", HasLS64)
526     .Default(false);
527 }
528 
529 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
530                                              DiagnosticsEngine &Diags) {
531   FPU = FPUMode;
532   HasCRC = false;
533   HasAES = false;
534   HasSHA2 = false;
535   HasSHA3 = false;
536   HasSM4 = false;
537   HasUnaligned = true;
538   HasFullFP16 = false;
539   HasDotProd = false;
540   HasFP16FML = false;
541   HasMTE = false;
542   HasTME = false;
543   HasLS64 = false;
544   HasRandGen = false;
545   HasMatMul = false;
546   HasBFloat16 = false;
547   HasSVE2 = false;
548   HasSVE2AES = false;
549   HasSVE2SHA3 = false;
550   HasSVE2SM4 = false;
551   HasSVE2BitPerm = false;
552   HasMatmulFP64 = false;
553   HasMatmulFP32 = false;
554   HasLSE = false;
555   HasMOPS = false;
556 
557   ArchKind = llvm::AArch64::ArchKind::INVALID;
558 
559   for (const auto &Feature : Features) {
560     if (Feature == "+neon")
561       FPU |= NeonMode;
562     if (Feature == "+sve") {
563       FPU |= SveMode;
564       HasFullFP16 = true;
565     }
566     if (Feature == "+sve2") {
567       FPU |= SveMode;
568       HasFullFP16 = true;
569       HasSVE2 = true;
570     }
571     if (Feature == "+sve2-aes") {
572       FPU |= SveMode;
573       HasFullFP16 = true;
574       HasSVE2 = true;
575       HasSVE2AES = true;
576     }
577     if (Feature == "+sve2-sha3") {
578       FPU |= SveMode;
579       HasFullFP16 = true;
580       HasSVE2 = true;
581       HasSVE2SHA3 = true;
582     }
583     if (Feature == "+sve2-sm4") {
584       FPU |= SveMode;
585       HasFullFP16 = true;
586       HasSVE2 = true;
587       HasSVE2SM4 = true;
588     }
589     if (Feature == "+sve2-bitperm") {
590       FPU |= SveMode;
591       HasFullFP16 = true;
592       HasSVE2 = true;
593       HasSVE2BitPerm = true;
594     }
595     if (Feature == "+f32mm") {
596       FPU |= SveMode;
597       HasMatmulFP32 = true;
598     }
599     if (Feature == "+f64mm") {
600       FPU |= SveMode;
601       HasMatmulFP64 = true;
602     }
603     if (Feature == "+crc")
604       HasCRC = true;
605     if (Feature == "+aes")
606       HasAES = true;
607     if (Feature == "+sha2")
608       HasSHA2 = true;
609     if (Feature == "+sha3") {
610       HasSHA2 = true;
611       HasSHA3 = true;
612     }
613     if (Feature == "+sm4")
614       HasSM4 = true;
615     if (Feature == "+strict-align")
616       HasUnaligned = false;
617     if (Feature == "+v8a")
618       ArchKind = llvm::AArch64::ArchKind::ARMV8A;
619     if (Feature == "+v8.1a")
620       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
621     if (Feature == "+v8.2a")
622       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
623     if (Feature == "+v8.3a")
624       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
625     if (Feature == "+v8.4a")
626       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
627     if (Feature == "+v8.5a")
628       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
629     if (Feature == "+v8.6a")
630       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
631     if (Feature == "+v8.7a")
632       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
633     if (Feature == "+v8.8a")
634       ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
635     if (Feature == "+v9a")
636       ArchKind = llvm::AArch64::ArchKind::ARMV9A;
637     if (Feature == "+v9.1a")
638       ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
639     if (Feature == "+v9.2a")
640       ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
641     if (Feature == "+v9.3a")
642       ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
643     if (Feature == "+v8r")
644       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
645     if (Feature == "+fullfp16")
646       HasFullFP16 = true;
647     if (Feature == "+dotprod")
648       HasDotProd = true;
649     if (Feature == "+fp16fml")
650       HasFP16FML = true;
651     if (Feature == "+mte")
652       HasMTE = true;
653     if (Feature == "+tme")
654       HasTME = true;
655     if (Feature == "+pauth")
656       HasPAuth = true;
657     if (Feature == "+i8mm")
658       HasMatMul = true;
659     if (Feature == "+bf16")
660       HasBFloat16 = true;
661     if (Feature == "+lse")
662       HasLSE = true;
663     if (Feature == "+ls64")
664       HasLS64 = true;
665     if (Feature == "+rand")
666       HasRandGen = true;
667     if (Feature == "+flagm")
668       HasFlagM = true;
669     if (Feature == "+mops")
670       HasMOPS = true;
671   }
672 
673   setDataLayout();
674 
675   return true;
676 }
677 
678 TargetInfo::CallingConvCheckResult
679 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
680   switch (CC) {
681   case CC_C:
682   case CC_Swift:
683   case CC_SwiftAsync:
684   case CC_PreserveMost:
685   case CC_PreserveAll:
686   case CC_OpenCLKernel:
687   case CC_AArch64VectorCall:
688   case CC_AArch64SVEPCS:
689   case CC_Win64:
690     return CCCR_OK;
691   default:
692     return CCCR_Warning;
693   }
694 }
695 
696 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
697 
698 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
699   return TargetInfo::AArch64ABIBuiltinVaList;
700 }
701 
702 const char *const AArch64TargetInfo::GCCRegNames[] = {
703     // 32-bit Integer registers
704     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
705     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
706     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
707 
708     // 64-bit Integer registers
709     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
710     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
711     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
712 
713     // 32-bit floating point regsisters
714     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
715     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
716     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
717 
718     // 64-bit floating point regsisters
719     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
720     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
721     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
722 
723     // Neon vector registers
724     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
725     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
726     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
727 
728     // SVE vector registers
729     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
730     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
731     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
732 
733     // SVE predicate registers
734     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
735     "p11", "p12", "p13", "p14", "p15"
736 };
737 
738 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
739   return llvm::makeArrayRef(GCCRegNames);
740 }
741 
742 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
743     {{"w31"}, "wsp"},
744     {{"x31"}, "sp"},
745     // GCC rN registers are aliases of xN registers.
746     {{"r0"}, "x0"},
747     {{"r1"}, "x1"},
748     {{"r2"}, "x2"},
749     {{"r3"}, "x3"},
750     {{"r4"}, "x4"},
751     {{"r5"}, "x5"},
752     {{"r6"}, "x6"},
753     {{"r7"}, "x7"},
754     {{"r8"}, "x8"},
755     {{"r9"}, "x9"},
756     {{"r10"}, "x10"},
757     {{"r11"}, "x11"},
758     {{"r12"}, "x12"},
759     {{"r13"}, "x13"},
760     {{"r14"}, "x14"},
761     {{"r15"}, "x15"},
762     {{"r16"}, "x16"},
763     {{"r17"}, "x17"},
764     {{"r18"}, "x18"},
765     {{"r19"}, "x19"},
766     {{"r20"}, "x20"},
767     {{"r21"}, "x21"},
768     {{"r22"}, "x22"},
769     {{"r23"}, "x23"},
770     {{"r24"}, "x24"},
771     {{"r25"}, "x25"},
772     {{"r26"}, "x26"},
773     {{"r27"}, "x27"},
774     {{"r28"}, "x28"},
775     {{"r29", "x29"}, "fp"},
776     {{"r30", "x30"}, "lr"},
777     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
778     // don't want to substitute one of these for a different-sized one.
779 };
780 
781 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
782   return llvm::makeArrayRef(GCCRegAliases);
783 }
784 
785 bool AArch64TargetInfo::validateAsmConstraint(
786     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
787   switch (*Name) {
788   default:
789     return false;
790   case 'w': // Floating point and SIMD registers (V0-V31)
791     Info.setAllowsRegister();
792     return true;
793   case 'I': // Constant that can be used with an ADD instruction
794   case 'J': // Constant that can be used with a SUB instruction
795   case 'K': // Constant that can be used with a 32-bit logical instruction
796   case 'L': // Constant that can be used with a 64-bit logical instruction
797   case 'M': // Constant that can be used as a 32-bit MOV immediate
798   case 'N': // Constant that can be used as a 64-bit MOV immediate
799   case 'Y': // Floating point constant zero
800   case 'Z': // Integer constant zero
801     return true;
802   case 'Q': // A memory reference with base register and no offset
803     Info.setAllowsMemory();
804     return true;
805   case 'S': // A symbolic address
806     Info.setAllowsRegister();
807     return true;
808   case 'U':
809     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
810       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
811       Info.setAllowsRegister();
812       Name += 2;
813       return true;
814     }
815     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
816     // Utf: A memory address suitable for ldp/stp in TF mode.
817     // Usa: An absolute symbolic address.
818     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
819 
820     // Better to return an error saying that it's an unrecognised constraint
821     // even if this is a valid constraint in gcc.
822     return false;
823   case 'z': // Zero register, wzr or xzr
824     Info.setAllowsRegister();
825     return true;
826   case 'x': // Floating point and SIMD registers (V0-V15)
827     Info.setAllowsRegister();
828     return true;
829   case 'y': // SVE registers (V0-V7)
830     Info.setAllowsRegister();
831     return true;
832   }
833   return false;
834 }
835 
836 bool AArch64TargetInfo::validateConstraintModifier(
837     StringRef Constraint, char Modifier, unsigned Size,
838     std::string &SuggestedModifier) const {
839   // Strip off constraint modifiers.
840   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
841     Constraint = Constraint.substr(1);
842 
843   switch (Constraint[0]) {
844   default:
845     return true;
846   case 'z':
847   case 'r': {
848     switch (Modifier) {
849     case 'x':
850     case 'w':
851       // For now assume that the person knows what they're
852       // doing with the modifier.
853       return true;
854     default:
855       // By default an 'r' constraint will be in the 'x'
856       // registers.
857       if (Size == 64)
858         return true;
859 
860       if (Size == 512)
861         return HasLS64;
862 
863       SuggestedModifier = "w";
864       return false;
865     }
866   }
867   }
868 }
869 
870 const char *AArch64TargetInfo::getClobbers() const { return ""; }
871 
872 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
873   if (RegNo == 0)
874     return 0;
875   if (RegNo == 1)
876     return 1;
877   return -1;
878 }
879 
880 bool AArch64TargetInfo::hasInt128Type() const { return true; }
881 
882 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
883                                          const TargetOptions &Opts)
884     : AArch64TargetInfo(Triple, Opts) {}
885 
886 void AArch64leTargetInfo::setDataLayout() {
887   if (getTriple().isOSBinFormatMachO()) {
888     if(getTriple().isArch32Bit())
889       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
890     else
891       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
892   } else
893     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
894 }
895 
896 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
897                                            MacroBuilder &Builder) const {
898   Builder.defineMacro("__AARCH64EL__");
899   AArch64TargetInfo::getTargetDefines(Opts, Builder);
900 }
901 
902 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
903                                          const TargetOptions &Opts)
904     : AArch64TargetInfo(Triple, Opts) {}
905 
906 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
907                                            MacroBuilder &Builder) const {
908   Builder.defineMacro("__AARCH64EB__");
909   Builder.defineMacro("__AARCH_BIG_ENDIAN");
910   Builder.defineMacro("__ARM_BIG_ENDIAN");
911   AArch64TargetInfo::getTargetDefines(Opts, Builder);
912 }
913 
914 void AArch64beTargetInfo::setDataLayout() {
915   assert(!getTriple().isOSBinFormatMachO());
916   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
917 }
918 
919 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
920                                                const TargetOptions &Opts)
921     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
922 
923   // This is an LLP64 platform.
924   // int:4, long:4, long long:8, long double:8.
925   IntWidth = IntAlign = 32;
926   LongWidth = LongAlign = 32;
927   DoubleAlign = LongLongAlign = 64;
928   LongDoubleWidth = LongDoubleAlign = 64;
929   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
930   IntMaxType = SignedLongLong;
931   Int64Type = SignedLongLong;
932   SizeType = UnsignedLongLong;
933   PtrDiffType = SignedLongLong;
934   IntPtrType = SignedLongLong;
935 }
936 
937 void WindowsARM64TargetInfo::setDataLayout() {
938   resetDataLayout(Triple.isOSBinFormatMachO()
939                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
940                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
941                   Triple.isOSBinFormatMachO() ? "_" : "");
942 }
943 
944 TargetInfo::BuiltinVaListKind
945 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
946   return TargetInfo::CharPtrBuiltinVaList;
947 }
948 
949 TargetInfo::CallingConvCheckResult
950 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
951   switch (CC) {
952   case CC_X86StdCall:
953   case CC_X86ThisCall:
954   case CC_X86FastCall:
955   case CC_X86VectorCall:
956     return CCCR_Ignore;
957   case CC_C:
958   case CC_OpenCLKernel:
959   case CC_PreserveMost:
960   case CC_PreserveAll:
961   case CC_Swift:
962   case CC_SwiftAsync:
963   case CC_Win64:
964     return CCCR_OK;
965   default:
966     return CCCR_Warning;
967   }
968 }
969 
970 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
971                                                    const TargetOptions &Opts)
972     : WindowsARM64TargetInfo(Triple, Opts) {
973   TheCXXABI.set(TargetCXXABI::Microsoft);
974 }
975 
976 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
977                                                 MacroBuilder &Builder) const {
978   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
979   Builder.defineMacro("_M_ARM64", "1");
980 }
981 
982 TargetInfo::CallingConvKind
983 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
984   return CCK_MicrosoftWin64;
985 }
986 
987 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
988   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
989 
990   // MSVC does size based alignment for arm64 based on alignment section in
991   // below document, replicate that to keep alignment consistent with object
992   // files compiled by MSVC.
993   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
994   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
995     Align = std::max(Align, 128u);    // align type at least 16 bytes
996   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
997     Align = std::max(Align, 64u);     // align type at least 8 butes
998   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
999     Align = std::max(Align, 32u);     // align type at least 4 bytes
1000   }
1001   return Align;
1002 }
1003 
1004 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1005                                            const TargetOptions &Opts)
1006     : WindowsARM64TargetInfo(Triple, Opts) {
1007   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1008 }
1009 
1010 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1011                                                  const TargetOptions &Opts)
1012     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1013   Int64Type = SignedLongLong;
1014   if (getTriple().isArch32Bit())
1015     IntMaxType = SignedLongLong;
1016 
1017   WCharType = SignedInt;
1018   UseSignedCharForObjCBool = false;
1019 
1020   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1021   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1022 
1023   UseZeroLengthBitfieldAlignment = false;
1024 
1025   if (getTriple().isArch32Bit()) {
1026     UseBitFieldTypeAlignment = false;
1027     ZeroLengthBitfieldBoundary = 32;
1028     UseZeroLengthBitfieldAlignment = true;
1029     TheCXXABI.set(TargetCXXABI::WatchOS);
1030   } else
1031     TheCXXABI.set(TargetCXXABI::AppleARM64);
1032 }
1033 
1034 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1035                                            const llvm::Triple &Triple,
1036                                            MacroBuilder &Builder) const {
1037   Builder.defineMacro("__AARCH64_SIMD__");
1038   if (Triple.isArch32Bit())
1039     Builder.defineMacro("__ARM64_ARCH_8_32__");
1040   else
1041     Builder.defineMacro("__ARM64_ARCH_8__");
1042   Builder.defineMacro("__ARM_NEON__");
1043   Builder.defineMacro("__LITTLE_ENDIAN__");
1044   Builder.defineMacro("__REGISTER_PREFIX__", "");
1045   Builder.defineMacro("__arm64", "1");
1046   Builder.defineMacro("__arm64__", "1");
1047 
1048   if (Triple.isArm64e())
1049     Builder.defineMacro("__arm64e__", "1");
1050 
1051   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1052 }
1053 
1054 TargetInfo::BuiltinVaListKind
1055 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1056   return TargetInfo::CharPtrBuiltinVaList;
1057 }
1058 
1059 // 64-bit RenderScript is aarch64
1060 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1061                                                    const TargetOptions &Opts)
1062     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1063                                        Triple.getOSName(),
1064                                        Triple.getEnvironmentName()),
1065                           Opts) {
1066   IsRenderScriptTarget = true;
1067 }
1068 
1069 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1070                                                 MacroBuilder &Builder) const {
1071   Builder.defineMacro("__RENDERSCRIPT__");
1072   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1073 }
1074