1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
44   switch (Kind) {
45   case llvm::AArch64::ArchKind::ARMV9A:
46   case llvm::AArch64::ArchKind::ARMV9_1A:
47   case llvm::AArch64::ArchKind::ARMV9_2A:
48   case llvm::AArch64::ArchKind::ARMV9_3A:
49     return "9";
50   default:
51     return "8";
52   }
53 }
54 
55 StringRef AArch64TargetInfo::getArchProfile() const {
56   switch (ArchKind) {
57   case llvm::AArch64::ArchKind::ARMV8R:
58     return "R";
59   default:
60     return "A";
61   }
62 }
63 
64 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
65                                      const TargetOptions &Opts)
66     : TargetInfo(Triple), ABI("aapcs") {
67   if (getTriple().isOSOpenBSD()) {
68     Int64Type = SignedLongLong;
69     IntMaxType = SignedLongLong;
70   } else {
71     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
72       WCharType = UnsignedInt;
73 
74     Int64Type = SignedLong;
75     IntMaxType = SignedLong;
76   }
77 
78   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
79   HasLegalHalfType = true;
80   HasFloat16 = true;
81 
82   if (Triple.isArch64Bit())
83     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
84   else
85     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
86 
87   MaxVectorAlign = 128;
88   MaxAtomicInlineWidth = 128;
89   MaxAtomicPromoteWidth = 128;
90 
91   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
92   LongDoubleFormat = &llvm::APFloat::IEEEquad();
93 
94   BFloat16Width = BFloat16Align = 16;
95   BFloat16Format = &llvm::APFloat::BFloat();
96 
97   // Make __builtin_ms_va_list available.
98   HasBuiltinMSVaList = true;
99 
100   // Make the SVE types available.  Note that this deliberately doesn't
101   // depend on SveMode, since in principle it should be possible to turn
102   // SVE on and off within a translation unit.  It should also be possible
103   // to compile the global declaration:
104   //
105   // __SVInt8_t *ptr;
106   //
107   // even without SVE.
108   HasAArch64SVETypes = true;
109 
110   // {} in inline assembly are neon specifiers, not assembly variant
111   // specifiers.
112   NoAsmVariants = true;
113 
114   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
115   // contributes to the alignment of the containing aggregate in the same way
116   // a plain (non bit-field) member of that type would, without exception for
117   // zero-sized or anonymous bit-fields."
118   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
119   UseZeroLengthBitfieldAlignment = true;
120 
121   // AArch64 targets default to using the ARM C++ ABI.
122   TheCXXABI.set(TargetCXXABI::GenericAArch64);
123 
124   if (Triple.getOS() == llvm::Triple::Linux)
125     this->MCountName = "\01_mcount";
126   else if (Triple.getOS() == llvm::Triple::UnknownOS)
127     this->MCountName =
128         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
129 }
130 
131 StringRef AArch64TargetInfo::getABI() const { return ABI; }
132 
133 bool AArch64TargetInfo::setABI(const std::string &Name) {
134   if (Name != "aapcs" && Name != "darwinpcs")
135     return false;
136 
137   ABI = Name;
138   return true;
139 }
140 
141 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
142                                                  BranchProtectionInfo &BPI,
143                                                  StringRef &Err) const {
144   llvm::ARM::ParsedBranchProtection PBP;
145   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
146     return false;
147 
148   BPI.SignReturnAddr =
149       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
150           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
151           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
152           .Default(LangOptions::SignReturnAddressScopeKind::None);
153 
154   if (PBP.Key == "a_key")
155     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
156   else
157     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
158 
159   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
160   return true;
161 }
162 
163 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
164   return Name == "generic" ||
165          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
166 }
167 
168 bool AArch64TargetInfo::setCPU(const std::string &Name) {
169   return isValidCPUName(Name);
170 }
171 
172 void AArch64TargetInfo::fillValidCPUList(
173     SmallVectorImpl<StringRef> &Values) const {
174   llvm::AArch64::fillValidCPUArchList(Values);
175 }
176 
177 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
178                                                 MacroBuilder &Builder) const {
179   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
180   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
181   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
182 }
183 
184 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
185                                                 MacroBuilder &Builder) const {
186   // Also include the ARMv8.1 defines
187   getTargetDefinesARMV81A(Opts, Builder);
188 }
189 
190 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
191                                                 MacroBuilder &Builder) const {
192   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
193   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
194   // Also include the Armv8.2 defines
195   getTargetDefinesARMV82A(Opts, Builder);
196 }
197 
198 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
199                                                 MacroBuilder &Builder) const {
200   // Also include the Armv8.3 defines
201   getTargetDefinesARMV83A(Opts, Builder);
202 }
203 
204 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
205                                                 MacroBuilder &Builder) const {
206   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
207   // Also include the Armv8.4 defines
208   getTargetDefinesARMV84A(Opts, Builder);
209 }
210 
211 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
212                                                 MacroBuilder &Builder) const {
213   // Also include the Armv8.5 defines
214   // FIXME: Armv8.6 makes the following extensions mandatory:
215   // - __ARM_FEATURE_BF16
216   // - __ARM_FEATURE_MATMUL_INT8
217   // Handle them here.
218   getTargetDefinesARMV85A(Opts, Builder);
219 }
220 
221 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
222                                                 MacroBuilder &Builder) const {
223   // Also include the Armv8.6 defines
224   getTargetDefinesARMV86A(Opts, Builder);
225 }
226 
227 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
228                                                 MacroBuilder &Builder) const {
229   // Also include the Armv8.7 defines
230   getTargetDefinesARMV87A(Opts, Builder);
231 }
232 
233 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
234                                                MacroBuilder &Builder) const {
235   // Armv9-A maps to Armv8.5-A
236   getTargetDefinesARMV85A(Opts, Builder);
237 }
238 
239 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
240                                                 MacroBuilder &Builder) const {
241   // Armv9.1-A maps to Armv8.6-A
242   getTargetDefinesARMV86A(Opts, Builder);
243 }
244 
245 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
246                                                 MacroBuilder &Builder) const {
247   // Armv9.2-A maps to Armv8.7-A
248   getTargetDefinesARMV87A(Opts, Builder);
249 }
250 
251 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
252                                                 MacroBuilder &Builder) const {
253   // Armv9.3-A maps to Armv8.8-A
254   getTargetDefinesARMV88A(Opts, Builder);
255 }
256 
257 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
258                                          MacroBuilder &Builder) const {
259   // Target identification.
260   Builder.defineMacro("__aarch64__");
261   // For bare-metal.
262   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
263       getTriple().isOSBinFormatELF())
264     Builder.defineMacro("__ELF__");
265 
266   // Target properties.
267   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
268     Builder.defineMacro("_LP64");
269     Builder.defineMacro("__LP64__");
270   }
271 
272   std::string CodeModel = getTargetOpts().CodeModel;
273   if (CodeModel == "default")
274     CodeModel = "small";
275   for (char &c : CodeModel)
276     c = toupper(c);
277   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
278 
279   // ACLE predefines. Many can only have one possible value on v8 AArch64.
280   Builder.defineMacro("__ARM_ACLE", "200");
281   Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
282   Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
283 
284   Builder.defineMacro("__ARM_64BIT_STATE", "1");
285   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
286   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
287 
288   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
289   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
290   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
291   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
292   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
293   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
294   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
295 
296   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
297 
298   // 0xe implies support for half, single and double precision operations.
299   Builder.defineMacro("__ARM_FP", "0xE");
300 
301   // PCS specifies this for SysV variants, which is all we support. Other ABIs
302   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
303   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
304   Builder.defineMacro("__ARM_FP16_ARGS", "1");
305 
306   if (Opts.UnsafeFPMath)
307     Builder.defineMacro("__ARM_FP_FAST", "1");
308 
309   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
310                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
311 
312   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
313 
314   if (FPU & NeonMode) {
315     Builder.defineMacro("__ARM_NEON", "1");
316     // 64-bit NEON supports half, single and double precision operations.
317     Builder.defineMacro("__ARM_NEON_FP", "0xE");
318   }
319 
320   if (FPU & SveMode)
321     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
322 
323   if ((FPU & NeonMode) && (FPU & SveMode))
324     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
325 
326   if (HasSVE2)
327     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
328 
329   if (HasSVE2 && HasSVE2AES)
330     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
331 
332   if (HasSVE2 && HasSVE2BitPerm)
333     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
334 
335   if (HasSVE2 && HasSVE2SHA3)
336     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
337 
338   if (HasSVE2 && HasSVE2SM4)
339     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
340 
341   if (HasCRC)
342     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
343 
344   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
345   // macros for AES, SHA2, SHA3 and SM4
346   if (HasAES && HasSHA2)
347     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
348 
349   if (HasAES)
350     Builder.defineMacro("__ARM_FEATURE_AES", "1");
351 
352   if (HasSHA2)
353     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
354 
355   if (HasSHA3) {
356     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
357     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
358   }
359 
360   if (HasSM4) {
361     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
362     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
363   }
364 
365   if (HasUnaligned)
366     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
367 
368   if ((FPU & NeonMode) && HasFullFP16)
369     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
370   if (HasFullFP16)
371    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
372 
373   if (HasDotProd)
374     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
375 
376   if (HasMTE)
377     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
378 
379   if (HasTME)
380     Builder.defineMacro("__ARM_FEATURE_TME", "1");
381 
382   if (HasMatMul)
383     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
384 
385   if (HasLSE)
386     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
387 
388   if (HasBFloat16) {
389     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
390     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
391     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
392     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
393   }
394 
395   if ((FPU & SveMode) && HasBFloat16) {
396     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
397   }
398 
399   if ((FPU & SveMode) && HasMatmulFP64)
400     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
401 
402   if ((FPU & SveMode) && HasMatmulFP32)
403     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
404 
405   if ((FPU & SveMode) && HasMatMul)
406     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
407 
408   if ((FPU & NeonMode) && HasFP16FML)
409     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
410 
411   if (Opts.hasSignReturnAddress()) {
412     // Bitmask:
413     // 0: Protection using the A key
414     // 1: Protection using the B key
415     // 2: Protection including leaf functions
416     unsigned Value = 0;
417 
418     if (Opts.isSignReturnAddressWithAKey())
419       Value |= (1 << 0);
420     else
421       Value |= (1 << 1);
422 
423     if (Opts.isSignReturnAddressScopeAll())
424       Value |= (1 << 2);
425 
426     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
427   }
428 
429   if (Opts.BranchTargetEnforcement)
430     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
431 
432   if (HasLS64)
433     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
434 
435   if (HasRandGen)
436     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
437 
438   if (HasMOPS)
439     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
440 
441   switch (ArchKind) {
442   default:
443     break;
444   case llvm::AArch64::ArchKind::ARMV8_1A:
445     getTargetDefinesARMV81A(Opts, Builder);
446     break;
447   case llvm::AArch64::ArchKind::ARMV8_2A:
448     getTargetDefinesARMV82A(Opts, Builder);
449     break;
450   case llvm::AArch64::ArchKind::ARMV8_3A:
451     getTargetDefinesARMV83A(Opts, Builder);
452     break;
453   case llvm::AArch64::ArchKind::ARMV8_4A:
454     getTargetDefinesARMV84A(Opts, Builder);
455     break;
456   case llvm::AArch64::ArchKind::ARMV8_5A:
457     getTargetDefinesARMV85A(Opts, Builder);
458     break;
459   case llvm::AArch64::ArchKind::ARMV8_6A:
460     getTargetDefinesARMV86A(Opts, Builder);
461     break;
462   case llvm::AArch64::ArchKind::ARMV8_7A:
463     getTargetDefinesARMV87A(Opts, Builder);
464     break;
465   case llvm::AArch64::ArchKind::ARMV8_8A:
466     getTargetDefinesARMV88A(Opts, Builder);
467     break;
468   case llvm::AArch64::ArchKind::ARMV9A:
469     getTargetDefinesARMV9A(Opts, Builder);
470     break;
471   case llvm::AArch64::ArchKind::ARMV9_1A:
472     getTargetDefinesARMV91A(Opts, Builder);
473     break;
474   case llvm::AArch64::ArchKind::ARMV9_2A:
475     getTargetDefinesARMV92A(Opts, Builder);
476     break;
477   case llvm::AArch64::ArchKind::ARMV9_3A:
478     getTargetDefinesARMV93A(Opts, Builder);
479     break;
480   }
481 
482   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
483   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
484   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
485   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
486   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
487 
488   // Allow detection of fast FMA support.
489   Builder.defineMacro("__FP_FAST_FMA", "1");
490   Builder.defineMacro("__FP_FAST_FMAF", "1");
491 
492   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
493     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
494     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
495   }
496 }
497 
498 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
499   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
500                                              Builtin::FirstTSBuiltin);
501 }
502 
503 Optional<std::pair<unsigned, unsigned>>
504 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
505   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
506     return std::pair<unsigned, unsigned>(
507         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
508 
509   if (hasFeature("sve"))
510     return std::pair<unsigned, unsigned>(1, 16);
511 
512   return None;
513 }
514 
515 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
516   return llvm::StringSwitch<bool>(Feature)
517     .Cases("aarch64", "arm64", "arm", true)
518     .Case("neon", FPU & NeonMode)
519     .Cases("sve", "sve2", "sve2-bitperm", "sve2-aes", "sve2-sha3", "sve2-sm4", "f64mm", "f32mm", "i8mm", "bf16", FPU & SveMode)
520     .Case("ls64", HasLS64)
521     .Default(false);
522 }
523 
524 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
525                                              DiagnosticsEngine &Diags) {
526   FPU = FPUMode;
527   HasCRC = false;
528   HasAES = false;
529   HasSHA2 = false;
530   HasSHA3 = false;
531   HasSM4 = false;
532   HasUnaligned = true;
533   HasFullFP16 = false;
534   HasDotProd = false;
535   HasFP16FML = false;
536   HasMTE = false;
537   HasTME = false;
538   HasLS64 = false;
539   HasRandGen = false;
540   HasMatMul = false;
541   HasBFloat16 = false;
542   HasSVE2 = false;
543   HasSVE2AES = false;
544   HasSVE2SHA3 = false;
545   HasSVE2SM4 = false;
546   HasSVE2BitPerm = false;
547   HasMatmulFP64 = false;
548   HasMatmulFP32 = false;
549   HasLSE = false;
550   HasMOPS = false;
551 
552   ArchKind = llvm::AArch64::ArchKind::INVALID;
553 
554   for (const auto &Feature : Features) {
555     if (Feature == "+neon")
556       FPU |= NeonMode;
557     if (Feature == "+sve") {
558       FPU |= SveMode;
559       HasFullFP16 = true;
560     }
561     if (Feature == "+sve2") {
562       FPU |= SveMode;
563       HasFullFP16 = true;
564       HasSVE2 = true;
565     }
566     if (Feature == "+sve2-aes") {
567       FPU |= SveMode;
568       HasFullFP16 = true;
569       HasSVE2 = true;
570       HasSVE2AES = true;
571     }
572     if (Feature == "+sve2-sha3") {
573       FPU |= SveMode;
574       HasFullFP16 = true;
575       HasSVE2 = true;
576       HasSVE2SHA3 = true;
577     }
578     if (Feature == "+sve2-sm4") {
579       FPU |= SveMode;
580       HasFullFP16 = true;
581       HasSVE2 = true;
582       HasSVE2SM4 = true;
583     }
584     if (Feature == "+sve2-bitperm") {
585       FPU |= SveMode;
586       HasFullFP16 = true;
587       HasSVE2 = true;
588       HasSVE2BitPerm = true;
589     }
590     if (Feature == "+f32mm") {
591       FPU |= SveMode;
592       HasMatmulFP32 = true;
593     }
594     if (Feature == "+f64mm") {
595       FPU |= SveMode;
596       HasMatmulFP64 = true;
597     }
598     if (Feature == "+crc")
599       HasCRC = true;
600     if (Feature == "+aes")
601       HasAES = true;
602     if (Feature == "+sha2")
603       HasSHA2 = true;
604     if (Feature == "+sha3") {
605       HasSHA2 = true;
606       HasSHA3 = true;
607     }
608     if (Feature == "+sm4")
609       HasSM4 = true;
610     if (Feature == "+strict-align")
611       HasUnaligned = false;
612     if (Feature == "+v8a")
613       ArchKind = llvm::AArch64::ArchKind::ARMV8A;
614     if (Feature == "+v8.1a")
615       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
616     if (Feature == "+v8.2a")
617       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
618     if (Feature == "+v8.3a")
619       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
620     if (Feature == "+v8.4a")
621       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
622     if (Feature == "+v8.5a")
623       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
624     if (Feature == "+v8.6a")
625       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
626     if (Feature == "+v8.7a")
627       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
628     if (Feature == "+v8.8a")
629       ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
630     if (Feature == "+v9a")
631       ArchKind = llvm::AArch64::ArchKind::ARMV9A;
632     if (Feature == "+v9.1a")
633       ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
634     if (Feature == "+v9.2a")
635       ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
636     if (Feature == "+v9.3a")
637       ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
638     if (Feature == "+v8r")
639       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
640     if (Feature == "+fullfp16")
641       HasFullFP16 = true;
642     if (Feature == "+dotprod")
643       HasDotProd = true;
644     if (Feature == "+fp16fml")
645       HasFP16FML = true;
646     if (Feature == "+mte")
647       HasMTE = true;
648     if (Feature == "+tme")
649       HasTME = true;
650     if (Feature == "+pauth")
651       HasPAuth = true;
652     if (Feature == "+i8mm")
653       HasMatMul = true;
654     if (Feature == "+bf16")
655       HasBFloat16 = true;
656     if (Feature == "+lse")
657       HasLSE = true;
658     if (Feature == "+ls64")
659       HasLS64 = true;
660     if (Feature == "+rand")
661       HasRandGen = true;
662     if (Feature == "+flagm")
663       HasFlagM = true;
664     if (Feature == "+mops")
665       HasMOPS = true;
666   }
667 
668   setDataLayout();
669 
670   return true;
671 }
672 
673 TargetInfo::CallingConvCheckResult
674 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
675   switch (CC) {
676   case CC_C:
677   case CC_Swift:
678   case CC_SwiftAsync:
679   case CC_PreserveMost:
680   case CC_PreserveAll:
681   case CC_OpenCLKernel:
682   case CC_AArch64VectorCall:
683   case CC_AArch64SVEPCS:
684   case CC_Win64:
685     return CCCR_OK;
686   default:
687     return CCCR_Warning;
688   }
689 }
690 
691 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
692 
693 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
694   return TargetInfo::AArch64ABIBuiltinVaList;
695 }
696 
697 const char *const AArch64TargetInfo::GCCRegNames[] = {
698     // 32-bit Integer registers
699     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
700     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
701     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
702 
703     // 64-bit Integer registers
704     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
705     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
706     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
707 
708     // 32-bit floating point regsisters
709     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
710     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
711     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
712 
713     // 64-bit floating point regsisters
714     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
715     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
716     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
717 
718     // Neon vector registers
719     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
720     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
721     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
722 
723     // SVE vector registers
724     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
725     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
726     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
727 
728     // SVE predicate registers
729     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
730     "p11", "p12", "p13", "p14", "p15"
731 };
732 
733 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
734   return llvm::makeArrayRef(GCCRegNames);
735 }
736 
737 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
738     {{"w31"}, "wsp"},
739     {{"x31"}, "sp"},
740     // GCC rN registers are aliases of xN registers.
741     {{"r0"}, "x0"},
742     {{"r1"}, "x1"},
743     {{"r2"}, "x2"},
744     {{"r3"}, "x3"},
745     {{"r4"}, "x4"},
746     {{"r5"}, "x5"},
747     {{"r6"}, "x6"},
748     {{"r7"}, "x7"},
749     {{"r8"}, "x8"},
750     {{"r9"}, "x9"},
751     {{"r10"}, "x10"},
752     {{"r11"}, "x11"},
753     {{"r12"}, "x12"},
754     {{"r13"}, "x13"},
755     {{"r14"}, "x14"},
756     {{"r15"}, "x15"},
757     {{"r16"}, "x16"},
758     {{"r17"}, "x17"},
759     {{"r18"}, "x18"},
760     {{"r19"}, "x19"},
761     {{"r20"}, "x20"},
762     {{"r21"}, "x21"},
763     {{"r22"}, "x22"},
764     {{"r23"}, "x23"},
765     {{"r24"}, "x24"},
766     {{"r25"}, "x25"},
767     {{"r26"}, "x26"},
768     {{"r27"}, "x27"},
769     {{"r28"}, "x28"},
770     {{"r29", "x29"}, "fp"},
771     {{"r30", "x30"}, "lr"},
772     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
773     // don't want to substitute one of these for a different-sized one.
774 };
775 
776 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
777   return llvm::makeArrayRef(GCCRegAliases);
778 }
779 
780 bool AArch64TargetInfo::validateAsmConstraint(
781     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
782   switch (*Name) {
783   default:
784     return false;
785   case 'w': // Floating point and SIMD registers (V0-V31)
786     Info.setAllowsRegister();
787     return true;
788   case 'I': // Constant that can be used with an ADD instruction
789   case 'J': // Constant that can be used with a SUB instruction
790   case 'K': // Constant that can be used with a 32-bit logical instruction
791   case 'L': // Constant that can be used with a 64-bit logical instruction
792   case 'M': // Constant that can be used as a 32-bit MOV immediate
793   case 'N': // Constant that can be used as a 64-bit MOV immediate
794   case 'Y': // Floating point constant zero
795   case 'Z': // Integer constant zero
796     return true;
797   case 'Q': // A memory reference with base register and no offset
798     Info.setAllowsMemory();
799     return true;
800   case 'S': // A symbolic address
801     Info.setAllowsRegister();
802     return true;
803   case 'U':
804     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
805       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
806       Info.setAllowsRegister();
807       Name += 2;
808       return true;
809     }
810     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
811     // Utf: A memory address suitable for ldp/stp in TF mode.
812     // Usa: An absolute symbolic address.
813     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
814 
815     // Better to return an error saying that it's an unrecognised constraint
816     // even if this is a valid constraint in gcc.
817     return false;
818   case 'z': // Zero register, wzr or xzr
819     Info.setAllowsRegister();
820     return true;
821   case 'x': // Floating point and SIMD registers (V0-V15)
822     Info.setAllowsRegister();
823     return true;
824   case 'y': // SVE registers (V0-V7)
825     Info.setAllowsRegister();
826     return true;
827   }
828   return false;
829 }
830 
831 bool AArch64TargetInfo::validateConstraintModifier(
832     StringRef Constraint, char Modifier, unsigned Size,
833     std::string &SuggestedModifier) const {
834   // Strip off constraint modifiers.
835   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
836     Constraint = Constraint.substr(1);
837 
838   switch (Constraint[0]) {
839   default:
840     return true;
841   case 'z':
842   case 'r': {
843     switch (Modifier) {
844     case 'x':
845     case 'w':
846       // For now assume that the person knows what they're
847       // doing with the modifier.
848       return true;
849     default:
850       // By default an 'r' constraint will be in the 'x'
851       // registers.
852       if (Size == 64)
853         return true;
854 
855       if (Size == 512)
856         return HasLS64;
857 
858       SuggestedModifier = "w";
859       return false;
860     }
861   }
862   }
863 }
864 
865 const char *AArch64TargetInfo::getClobbers() const { return ""; }
866 
867 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
868   if (RegNo == 0)
869     return 0;
870   if (RegNo == 1)
871     return 1;
872   return -1;
873 }
874 
875 bool AArch64TargetInfo::hasInt128Type() const { return true; }
876 
877 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
878                                          const TargetOptions &Opts)
879     : AArch64TargetInfo(Triple, Opts) {}
880 
881 void AArch64leTargetInfo::setDataLayout() {
882   if (getTriple().isOSBinFormatMachO()) {
883     if(getTriple().isArch32Bit())
884       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
885     else
886       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
887   } else
888     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
889 }
890 
891 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
892                                            MacroBuilder &Builder) const {
893   Builder.defineMacro("__AARCH64EL__");
894   AArch64TargetInfo::getTargetDefines(Opts, Builder);
895 }
896 
897 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
898                                          const TargetOptions &Opts)
899     : AArch64TargetInfo(Triple, Opts) {}
900 
901 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
902                                            MacroBuilder &Builder) const {
903   Builder.defineMacro("__AARCH64EB__");
904   Builder.defineMacro("__AARCH_BIG_ENDIAN");
905   Builder.defineMacro("__ARM_BIG_ENDIAN");
906   AArch64TargetInfo::getTargetDefines(Opts, Builder);
907 }
908 
909 void AArch64beTargetInfo::setDataLayout() {
910   assert(!getTriple().isOSBinFormatMachO());
911   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
912 }
913 
914 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
915                                                const TargetOptions &Opts)
916     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
917 
918   // This is an LLP64 platform.
919   // int:4, long:4, long long:8, long double:8.
920   IntWidth = IntAlign = 32;
921   LongWidth = LongAlign = 32;
922   DoubleAlign = LongLongAlign = 64;
923   LongDoubleWidth = LongDoubleAlign = 64;
924   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
925   IntMaxType = SignedLongLong;
926   Int64Type = SignedLongLong;
927   SizeType = UnsignedLongLong;
928   PtrDiffType = SignedLongLong;
929   IntPtrType = SignedLongLong;
930 }
931 
932 void WindowsARM64TargetInfo::setDataLayout() {
933   resetDataLayout(Triple.isOSBinFormatMachO()
934                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
935                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
936                   Triple.isOSBinFormatMachO() ? "_" : "");
937 }
938 
939 TargetInfo::BuiltinVaListKind
940 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
941   return TargetInfo::CharPtrBuiltinVaList;
942 }
943 
944 TargetInfo::CallingConvCheckResult
945 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
946   switch (CC) {
947   case CC_X86StdCall:
948   case CC_X86ThisCall:
949   case CC_X86FastCall:
950   case CC_X86VectorCall:
951     return CCCR_Ignore;
952   case CC_C:
953   case CC_OpenCLKernel:
954   case CC_PreserveMost:
955   case CC_PreserveAll:
956   case CC_Swift:
957   case CC_SwiftAsync:
958   case CC_Win64:
959     return CCCR_OK;
960   default:
961     return CCCR_Warning;
962   }
963 }
964 
965 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
966                                                    const TargetOptions &Opts)
967     : WindowsARM64TargetInfo(Triple, Opts) {
968   TheCXXABI.set(TargetCXXABI::Microsoft);
969 }
970 
971 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
972                                                 MacroBuilder &Builder) const {
973   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
974   Builder.defineMacro("_M_ARM64", "1");
975 }
976 
977 TargetInfo::CallingConvKind
978 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
979   return CCK_MicrosoftWin64;
980 }
981 
982 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
983   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
984 
985   // MSVC does size based alignment for arm64 based on alignment section in
986   // below document, replicate that to keep alignment consistent with object
987   // files compiled by MSVC.
988   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
989   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
990     Align = std::max(Align, 128u);    // align type at least 16 bytes
991   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
992     Align = std::max(Align, 64u);     // align type at least 8 butes
993   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
994     Align = std::max(Align, 32u);     // align type at least 4 bytes
995   }
996   return Align;
997 }
998 
999 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1000                                            const TargetOptions &Opts)
1001     : WindowsARM64TargetInfo(Triple, Opts) {
1002   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1003 }
1004 
1005 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1006                                                  const TargetOptions &Opts)
1007     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1008   Int64Type = SignedLongLong;
1009   if (getTriple().isArch32Bit())
1010     IntMaxType = SignedLongLong;
1011 
1012   WCharType = SignedInt;
1013   UseSignedCharForObjCBool = false;
1014 
1015   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1016   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1017 
1018   UseZeroLengthBitfieldAlignment = false;
1019 
1020   if (getTriple().isArch32Bit()) {
1021     UseBitFieldTypeAlignment = false;
1022     ZeroLengthBitfieldBoundary = 32;
1023     UseZeroLengthBitfieldAlignment = true;
1024     TheCXXABI.set(TargetCXXABI::WatchOS);
1025   } else
1026     TheCXXABI.set(TargetCXXABI::AppleARM64);
1027 }
1028 
1029 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1030                                            const llvm::Triple &Triple,
1031                                            MacroBuilder &Builder) const {
1032   Builder.defineMacro("__AARCH64_SIMD__");
1033   if (Triple.isArch32Bit())
1034     Builder.defineMacro("__ARM64_ARCH_8_32__");
1035   else
1036     Builder.defineMacro("__ARM64_ARCH_8__");
1037   Builder.defineMacro("__ARM_NEON__");
1038   Builder.defineMacro("__LITTLE_ENDIAN__");
1039   Builder.defineMacro("__REGISTER_PREFIX__", "");
1040   Builder.defineMacro("__arm64", "1");
1041   Builder.defineMacro("__arm64__", "1");
1042 
1043   if (Triple.isArm64e())
1044     Builder.defineMacro("__arm64e__", "1");
1045 
1046   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1047 }
1048 
1049 TargetInfo::BuiltinVaListKind
1050 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1051   return TargetInfo::CharPtrBuiltinVaList;
1052 }
1053 
1054 // 64-bit RenderScript is aarch64
1055 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1056                                                    const TargetOptions &Opts)
1057     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1058                                        Triple.getOSName(),
1059                                        Triple.getEnvironmentName()),
1060                           Opts) {
1061   IsRenderScriptTarget = true;
1062 }
1063 
1064 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1065                                                 MacroBuilder &Builder) const {
1066   Builder.defineMacro("__RENDERSCRIPT__");
1067   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1068 }
1069