1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
getArchVersionString(llvm::AArch64::ArchKind Kind)43 static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
44   switch (Kind) {
45   case llvm::AArch64::ArchKind::ARMV9A:
46   case llvm::AArch64::ArchKind::ARMV9_1A:
47   case llvm::AArch64::ArchKind::ARMV9_2A:
48     return "9";
49   default:
50     return "8";
51   }
52 }
53 
AArch64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)54 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
55                                      const TargetOptions &Opts)
56     : TargetInfo(Triple), ABI("aapcs") {
57   if (getTriple().isOSOpenBSD()) {
58     Int64Type = SignedLongLong;
59     IntMaxType = SignedLongLong;
60   } else {
61     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
62       WCharType = UnsignedInt;
63 
64     Int64Type = SignedLong;
65     IntMaxType = SignedLong;
66   }
67 
68   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
69   HasLegalHalfType = true;
70   HasFloat16 = true;
71 
72   if (Triple.isArch64Bit())
73     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
74   else
75     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
76 
77   MaxVectorAlign = 128;
78   MaxAtomicInlineWidth = 128;
79   MaxAtomicPromoteWidth = 128;
80 
81   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
82   LongDoubleFormat = &llvm::APFloat::IEEEquad();
83 
84   BFloat16Width = BFloat16Align = 16;
85   BFloat16Format = &llvm::APFloat::BFloat();
86 
87   // Make __builtin_ms_va_list available.
88   HasBuiltinMSVaList = true;
89 
90   // Make the SVE types available.  Note that this deliberately doesn't
91   // depend on SveMode, since in principle it should be possible to turn
92   // SVE on and off within a translation unit.  It should also be possible
93   // to compile the global declaration:
94   //
95   // __SVInt8_t *ptr;
96   //
97   // even without SVE.
98   HasAArch64SVETypes = true;
99 
100   // {} in inline assembly are neon specifiers, not assembly variant
101   // specifiers.
102   NoAsmVariants = true;
103 
104   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
105   // contributes to the alignment of the containing aggregate in the same way
106   // a plain (non bit-field) member of that type would, without exception for
107   // zero-sized or anonymous bit-fields."
108   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
109   UseZeroLengthBitfieldAlignment = true;
110 
111   // AArch64 targets default to using the ARM C++ ABI.
112   TheCXXABI.set(TargetCXXABI::GenericAArch64);
113 
114   if (Triple.getOS() == llvm::Triple::Linux)
115     this->MCountName = "\01_mcount";
116   else if (Triple.getOS() == llvm::Triple::UnknownOS)
117     this->MCountName =
118         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
119 }
120 
getABI() const121 StringRef AArch64TargetInfo::getABI() const { return ABI; }
122 
setABI(const std::string & Name)123 bool AArch64TargetInfo::setABI(const std::string &Name) {
124   if (Name != "aapcs" && Name != "darwinpcs")
125     return false;
126 
127   ABI = Name;
128   return true;
129 }
130 
validateBranchProtection(StringRef Spec,BranchProtectionInfo & BPI,StringRef & Err) const131 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
132                                                  BranchProtectionInfo &BPI,
133                                                  StringRef &Err) const {
134   llvm::AArch64::ParsedBranchProtection PBP;
135   if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
136     return false;
137 
138   BPI.SignReturnAddr =
139       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
140           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
141           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
142           .Default(LangOptions::SignReturnAddressScopeKind::None);
143 
144   if (PBP.Key == "a_key")
145     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
146   else
147     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
148 
149   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
150   return true;
151 }
152 
isValidCPUName(StringRef Name) const153 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
154   return Name == "generic" ||
155          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
156 }
157 
setCPU(const std::string & Name)158 bool AArch64TargetInfo::setCPU(const std::string &Name) {
159   return isValidCPUName(Name);
160 }
161 
fillValidCPUList(SmallVectorImpl<StringRef> & Values) const162 void AArch64TargetInfo::fillValidCPUList(
163     SmallVectorImpl<StringRef> &Values) const {
164   llvm::AArch64::fillValidCPUArchList(Values);
165 }
166 
getTargetDefinesARMV81A(const LangOptions & Opts,MacroBuilder & Builder) const167 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
168                                                 MacroBuilder &Builder) const {
169   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
170   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
171   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
172 }
173 
getTargetDefinesARMV82A(const LangOptions & Opts,MacroBuilder & Builder) const174 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
175                                                 MacroBuilder &Builder) const {
176   // Also include the ARMv8.1 defines
177   getTargetDefinesARMV81A(Opts, Builder);
178 }
179 
getTargetDefinesARMV83A(const LangOptions & Opts,MacroBuilder & Builder) const180 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
181                                                 MacroBuilder &Builder) const {
182   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
183   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
184   // Also include the Armv8.2 defines
185   getTargetDefinesARMV82A(Opts, Builder);
186 }
187 
getTargetDefinesARMV84A(const LangOptions & Opts,MacroBuilder & Builder) const188 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
189                                                 MacroBuilder &Builder) const {
190   // Also include the Armv8.3 defines
191   getTargetDefinesARMV83A(Opts, Builder);
192 }
193 
getTargetDefinesARMV85A(const LangOptions & Opts,MacroBuilder & Builder) const194 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
195                                                 MacroBuilder &Builder) const {
196   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
197   // Also include the Armv8.4 defines
198   getTargetDefinesARMV84A(Opts, Builder);
199 }
200 
getTargetDefinesARMV86A(const LangOptions & Opts,MacroBuilder & Builder) const201 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
202                                                 MacroBuilder &Builder) const {
203   // Also include the Armv8.5 defines
204   // FIXME: Armv8.6 makes the following extensions mandatory:
205   // - __ARM_FEATURE_BF16
206   // - __ARM_FEATURE_MATMUL_INT8
207   // Handle them here.
208   getTargetDefinesARMV85A(Opts, Builder);
209 }
210 
getTargetDefinesARMV87A(const LangOptions & Opts,MacroBuilder & Builder) const211 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
212                                                 MacroBuilder &Builder) const {
213   // Also include the Armv8.6 defines
214   getTargetDefinesARMV86A(Opts, Builder);
215 }
216 
getTargetDefinesARMV9A(const LangOptions & Opts,MacroBuilder & Builder) const217 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
218                                                MacroBuilder &Builder) const {
219   // Armv9-A maps to Armv8.5-A
220   getTargetDefinesARMV85A(Opts, Builder);
221 }
222 
getTargetDefinesARMV91A(const LangOptions & Opts,MacroBuilder & Builder) const223 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
224                                                 MacroBuilder &Builder) const {
225   // Armv9.1-A maps to Armv8.6-A
226   getTargetDefinesARMV86A(Opts, Builder);
227 }
228 
getTargetDefinesARMV92A(const LangOptions & Opts,MacroBuilder & Builder) const229 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
230                                                 MacroBuilder &Builder) const {
231   // Armv9.2-A maps to Armv8.7-A
232   getTargetDefinesARMV87A(Opts, Builder);
233 }
234 
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const235 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
236                                          MacroBuilder &Builder) const {
237   // Target identification.
238   Builder.defineMacro("__aarch64__");
239   // For bare-metal.
240   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
241       getTriple().isOSBinFormatELF())
242     Builder.defineMacro("__ELF__");
243 
244   // Target properties.
245   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
246     Builder.defineMacro("_LP64");
247     Builder.defineMacro("__LP64__");
248   }
249 
250   std::string CodeModel = getTargetOpts().CodeModel;
251   if (CodeModel == "default")
252     CodeModel = "small";
253   for (char &c : CodeModel)
254     c = toupper(c);
255   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
256 
257   // ACLE predefines. Many can only have one possible value on v8 AArch64.
258   Builder.defineMacro("__ARM_ACLE", "200");
259   Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
260   Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
261 
262   Builder.defineMacro("__ARM_64BIT_STATE", "1");
263   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
264   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
265 
266   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
267   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
268   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
269   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
270   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
271   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
272   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
273 
274   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
275 
276   // 0xe implies support for half, single and double precision operations.
277   Builder.defineMacro("__ARM_FP", "0xE");
278 
279   // PCS specifies this for SysV variants, which is all we support. Other ABIs
280   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
281   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
282   Builder.defineMacro("__ARM_FP16_ARGS", "1");
283 
284   if (Opts.UnsafeFPMath)
285     Builder.defineMacro("__ARM_FP_FAST", "1");
286 
287   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
288                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
289 
290   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
291 
292   if (FPU & NeonMode) {
293     Builder.defineMacro("__ARM_NEON", "1");
294     // 64-bit NEON supports half, single and double precision operations.
295     Builder.defineMacro("__ARM_NEON_FP", "0xE");
296   }
297 
298   if (FPU & SveMode)
299     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
300 
301   if (HasSVE2)
302     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
303 
304   if (HasSVE2 && HasSVE2AES)
305     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
306 
307   if (HasSVE2 && HasSVE2BitPerm)
308     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
309 
310   if (HasSVE2 && HasSVE2SHA3)
311     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
312 
313   if (HasSVE2 && HasSVE2SM4)
314     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
315 
316   if (HasCRC)
317     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
318 
319   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
320   // macros for AES, SHA2, SHA3 and SM4
321   if (HasAES && HasSHA2)
322     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
323 
324   if (HasAES)
325     Builder.defineMacro("__ARM_FEATURE_AES", "1");
326 
327   if (HasSHA2)
328     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
329 
330   if (HasSHA3) {
331     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
332     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
333   }
334 
335   if (HasSM4) {
336     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
337     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
338   }
339 
340   if (HasUnaligned)
341     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
342 
343   if ((FPU & NeonMode) && HasFullFP16)
344     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
345   if (HasFullFP16)
346    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
347 
348   if (HasDotProd)
349     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
350 
351   if (HasMTE)
352     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
353 
354   if (HasTME)
355     Builder.defineMacro("__ARM_FEATURE_TME", "1");
356 
357   if (HasMatMul)
358     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
359 
360   if (HasLSE)
361     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
362 
363   if (HasBFloat16) {
364     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
365     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
366     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
367     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
368   }
369 
370   if ((FPU & SveMode) && HasBFloat16) {
371     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
372   }
373 
374   if ((FPU & SveMode) && HasMatmulFP64)
375     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
376 
377   if ((FPU & SveMode) && HasMatmulFP32)
378     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
379 
380   if ((FPU & SveMode) && HasMatMul)
381     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
382 
383   if ((FPU & NeonMode) && HasFP16FML)
384     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
385 
386   if (Opts.hasSignReturnAddress()) {
387     // Bitmask:
388     // 0: Protection using the A key
389     // 1: Protection using the B key
390     // 2: Protection including leaf functions
391     unsigned Value = 0;
392 
393     if (Opts.isSignReturnAddressWithAKey())
394       Value |= (1 << 0);
395     else
396       Value |= (1 << 1);
397 
398     if (Opts.isSignReturnAddressScopeAll())
399       Value |= (1 << 2);
400 
401     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
402   }
403 
404   if (Opts.BranchTargetEnforcement)
405     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
406 
407   if (HasLS64)
408     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
409 
410   if (HasRandGen)
411     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
412 
413   switch (ArchKind) {
414   default:
415     break;
416   case llvm::AArch64::ArchKind::ARMV8_1A:
417     getTargetDefinesARMV81A(Opts, Builder);
418     break;
419   case llvm::AArch64::ArchKind::ARMV8_2A:
420     getTargetDefinesARMV82A(Opts, Builder);
421     break;
422   case llvm::AArch64::ArchKind::ARMV8_3A:
423     getTargetDefinesARMV83A(Opts, Builder);
424     break;
425   case llvm::AArch64::ArchKind::ARMV8_4A:
426     getTargetDefinesARMV84A(Opts, Builder);
427     break;
428   case llvm::AArch64::ArchKind::ARMV8_5A:
429     getTargetDefinesARMV85A(Opts, Builder);
430     break;
431   case llvm::AArch64::ArchKind::ARMV8_6A:
432     getTargetDefinesARMV86A(Opts, Builder);
433     break;
434   case llvm::AArch64::ArchKind::ARMV8_7A:
435     getTargetDefinesARMV87A(Opts, Builder);
436     break;
437   case llvm::AArch64::ArchKind::ARMV9A:
438     getTargetDefinesARMV9A(Opts, Builder);
439     break;
440   case llvm::AArch64::ArchKind::ARMV9_1A:
441     getTargetDefinesARMV91A(Opts, Builder);
442     break;
443   case llvm::AArch64::ArchKind::ARMV9_2A:
444     getTargetDefinesARMV92A(Opts, Builder);
445     break;
446   }
447 
448   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
449   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
450   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
451   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
452   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
453 
454   if (Opts.ArmSveVectorBits) {
455     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
456     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
457   }
458 }
459 
getTargetBuiltins() const460 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
461   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
462                                              Builtin::FirstTSBuiltin);
463 }
464 
465 Optional<std::pair<unsigned, unsigned>>
getVScaleRange(const LangOptions & LangOpts) const466 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
467   if (LangOpts.ArmSveVectorBits) {
468     unsigned VScale = LangOpts.ArmSveVectorBits / 128;
469     return std::pair<unsigned, unsigned>(VScale, VScale);
470   }
471   if (hasFeature("sve"))
472     return std::pair<unsigned, unsigned>(0, 16);
473   return None;
474 }
475 
hasFeature(StringRef Feature) const476 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
477   return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
478          (Feature == "neon" && (FPU & NeonMode)) ||
479          ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
480            Feature == "sve2-aes" || Feature == "sve2-sha3" ||
481            Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
482            Feature == "i8mm" || Feature == "bf16") &&
483           (FPU & SveMode)) ||
484          (Feature == "ls64" && HasLS64);
485 }
486 
handleTargetFeatures(std::vector<std::string> & Features,DiagnosticsEngine & Diags)487 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
488                                              DiagnosticsEngine &Diags) {
489   FPU = FPUMode;
490   HasCRC = false;
491   HasCrypto = false;
492   HasAES = false;
493   HasSHA2 = false;
494   HasSHA3 = false;
495   HasSM4 = false;
496   HasUnaligned = true;
497   HasFullFP16 = false;
498   HasDotProd = false;
499   HasFP16FML = false;
500   HasMTE = false;
501   HasTME = false;
502   HasLS64 = false;
503   HasRandGen = false;
504   HasMatMul = false;
505   HasBFloat16 = false;
506   HasSVE2 = false;
507   HasSVE2AES = false;
508   HasSVE2SHA3 = false;
509   HasSVE2SM4 = false;
510   HasSVE2BitPerm = false;
511   HasMatmulFP64 = false;
512   HasMatmulFP32 = false;
513   HasLSE = false;
514 
515   ArchKind = llvm::AArch64::ArchKind::ARMV8A;
516 
517   for (const auto &Feature : Features) {
518     if (Feature == "+neon")
519       FPU |= NeonMode;
520     if (Feature == "+sve") {
521       FPU |= SveMode;
522       HasFullFP16 = 1;
523     }
524     if (Feature == "+sve2") {
525       FPU |= SveMode;
526       HasFullFP16 = 1;
527       HasSVE2 = 1;
528     }
529     if (Feature == "+sve2-aes") {
530       FPU |= SveMode;
531       HasFullFP16 = 1;
532       HasSVE2 = 1;
533       HasSVE2AES = 1;
534     }
535     if (Feature == "+sve2-sha3") {
536       FPU |= SveMode;
537       HasFullFP16 = 1;
538       HasSVE2 = 1;
539       HasSVE2SHA3 = 1;
540     }
541     if (Feature == "+sve2-sm4") {
542       FPU |= SveMode;
543       HasFullFP16 = 1;
544       HasSVE2 = 1;
545       HasSVE2SM4 = 1;
546     }
547     if (Feature == "+sve2-bitperm") {
548       FPU |= SveMode;
549       HasFullFP16 = 1;
550       HasSVE2 = 1;
551       HasSVE2BitPerm = 1;
552     }
553     if (Feature == "+f32mm") {
554       FPU |= SveMode;
555       HasMatmulFP32 = true;
556     }
557     if (Feature == "+f64mm") {
558       FPU |= SveMode;
559       HasMatmulFP64 = true;
560     }
561     if (Feature == "+crc")
562       HasCRC = true;
563     if (Feature == "+crypto")
564       HasCrypto = true;
565     if (Feature == "+aes")
566       HasAES = true;
567     if (Feature == "+sha2")
568       HasSHA2 = true;
569     if (Feature == "+sha3") {
570       HasSHA2 = true;
571       HasSHA3 = true;
572     }
573     if (Feature == "+sm4")
574       HasSM4 = true;
575     if (Feature == "+strict-align")
576       HasUnaligned = false;
577     if (Feature == "+v8.1a")
578       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
579     if (Feature == "+v8.2a")
580       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
581     if (Feature == "+v8.3a")
582       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
583     if (Feature == "+v8.4a")
584       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
585     if (Feature == "+v8.5a")
586       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
587     if (Feature == "+v8.6a")
588       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
589     if (Feature == "+v8.7a")
590       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
591     if (Feature == "+v9a")
592       ArchKind = llvm::AArch64::ArchKind::ARMV9A;
593     if (Feature == "+v9.1a")
594       ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
595     if (Feature == "+v9.2a")
596       ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
597     if (Feature == "+v8r")
598       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
599     if (Feature == "+fullfp16")
600       HasFullFP16 = true;
601     if (Feature == "+dotprod")
602       HasDotProd = true;
603     if (Feature == "+fp16fml")
604       HasFP16FML = true;
605     if (Feature == "+mte")
606       HasMTE = true;
607     if (Feature == "+tme")
608       HasTME = true;
609     if (Feature == "+pauth")
610       HasPAuth = true;
611     if (Feature == "+i8mm")
612       HasMatMul = true;
613     if (Feature == "+bf16")
614       HasBFloat16 = true;
615     if (Feature == "+lse")
616       HasLSE = true;
617     if (Feature == "+ls64")
618       HasLS64 = true;
619     if (Feature == "+rand")
620       HasRandGen = true;
621     if (Feature == "+flagm")
622       HasFlagM = true;
623   }
624 
625   setDataLayout();
626 
627   return true;
628 }
629 
630 TargetInfo::CallingConvCheckResult
checkCallingConvention(CallingConv CC) const631 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
632   switch (CC) {
633   case CC_C:
634   case CC_Swift:
635   case CC_SwiftAsync:
636   case CC_PreserveMost:
637   case CC_PreserveAll:
638   case CC_OpenCLKernel:
639   case CC_AArch64VectorCall:
640   case CC_Win64:
641     return CCCR_OK;
642   default:
643     return CCCR_Warning;
644   }
645 }
646 
isCLZForZeroUndef() const647 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
648 
getBuiltinVaListKind() const649 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
650   return TargetInfo::AArch64ABIBuiltinVaList;
651 }
652 
653 const char *const AArch64TargetInfo::GCCRegNames[] = {
654     // 32-bit Integer registers
655     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
656     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
657     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
658 
659     // 64-bit Integer registers
660     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
661     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
662     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
663 
664     // 32-bit floating point regsisters
665     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
666     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
667     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
668 
669     // 64-bit floating point regsisters
670     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
671     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
672     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
673 
674     // Neon vector registers
675     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
676     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
677     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
678 
679     // SVE vector registers
680     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
681     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
682     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
683 
684     // SVE predicate registers
685     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
686     "p11", "p12", "p13", "p14", "p15"
687 };
688 
getGCCRegNames() const689 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
690   return llvm::makeArrayRef(GCCRegNames);
691 }
692 
693 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
694     {{"w31"}, "wsp"},
695     {{"x31"}, "sp"},
696     // GCC rN registers are aliases of xN registers.
697     {{"r0"}, "x0"},
698     {{"r1"}, "x1"},
699     {{"r2"}, "x2"},
700     {{"r3"}, "x3"},
701     {{"r4"}, "x4"},
702     {{"r5"}, "x5"},
703     {{"r6"}, "x6"},
704     {{"r7"}, "x7"},
705     {{"r8"}, "x8"},
706     {{"r9"}, "x9"},
707     {{"r10"}, "x10"},
708     {{"r11"}, "x11"},
709     {{"r12"}, "x12"},
710     {{"r13"}, "x13"},
711     {{"r14"}, "x14"},
712     {{"r15"}, "x15"},
713     {{"r16"}, "x16"},
714     {{"r17"}, "x17"},
715     {{"r18"}, "x18"},
716     {{"r19"}, "x19"},
717     {{"r20"}, "x20"},
718     {{"r21"}, "x21"},
719     {{"r22"}, "x22"},
720     {{"r23"}, "x23"},
721     {{"r24"}, "x24"},
722     {{"r25"}, "x25"},
723     {{"r26"}, "x26"},
724     {{"r27"}, "x27"},
725     {{"r28"}, "x28"},
726     {{"r29", "x29"}, "fp"},
727     {{"r30", "x30"}, "lr"},
728     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
729     // don't want to substitute one of these for a different-sized one.
730 };
731 
getGCCRegAliases() const732 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
733   return llvm::makeArrayRef(GCCRegAliases);
734 }
735 
validateAsmConstraint(const char * & Name,TargetInfo::ConstraintInfo & Info) const736 bool AArch64TargetInfo::validateAsmConstraint(
737     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
738   switch (*Name) {
739   default:
740     return false;
741   case 'w': // Floating point and SIMD registers (V0-V31)
742     Info.setAllowsRegister();
743     return true;
744   case 'I': // Constant that can be used with an ADD instruction
745   case 'J': // Constant that can be used with a SUB instruction
746   case 'K': // Constant that can be used with a 32-bit logical instruction
747   case 'L': // Constant that can be used with a 64-bit logical instruction
748   case 'M': // Constant that can be used as a 32-bit MOV immediate
749   case 'N': // Constant that can be used as a 64-bit MOV immediate
750   case 'Y': // Floating point constant zero
751   case 'Z': // Integer constant zero
752     return true;
753   case 'Q': // A memory reference with base register and no offset
754     Info.setAllowsMemory();
755     return true;
756   case 'S': // A symbolic address
757     Info.setAllowsRegister();
758     return true;
759   case 'U':
760     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
761       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
762       Info.setAllowsRegister();
763       Name += 2;
764       return true;
765     }
766     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
767     // Utf: A memory address suitable for ldp/stp in TF mode.
768     // Usa: An absolute symbolic address.
769     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
770 
771     // Better to return an error saying that it's an unrecognised constraint
772     // even if this is a valid constraint in gcc.
773     return false;
774   case 'z': // Zero register, wzr or xzr
775     Info.setAllowsRegister();
776     return true;
777   case 'x': // Floating point and SIMD registers (V0-V15)
778     Info.setAllowsRegister();
779     return true;
780   case 'y': // SVE registers (V0-V7)
781     Info.setAllowsRegister();
782     return true;
783   }
784   return false;
785 }
786 
validateConstraintModifier(StringRef Constraint,char Modifier,unsigned Size,std::string & SuggestedModifier) const787 bool AArch64TargetInfo::validateConstraintModifier(
788     StringRef Constraint, char Modifier, unsigned Size,
789     std::string &SuggestedModifier) const {
790   // Strip off constraint modifiers.
791   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
792     Constraint = Constraint.substr(1);
793 
794   switch (Constraint[0]) {
795   default:
796     return true;
797   case 'z':
798   case 'r': {
799     switch (Modifier) {
800     case 'x':
801     case 'w':
802       // For now assume that the person knows what they're
803       // doing with the modifier.
804       return true;
805     default:
806       // By default an 'r' constraint will be in the 'x'
807       // registers.
808       if (Size == 64)
809         return true;
810 
811       if (Size == 512)
812         return HasLS64;
813 
814       SuggestedModifier = "w";
815       return false;
816     }
817   }
818   }
819 }
820 
getClobbers() const821 const char *AArch64TargetInfo::getClobbers() const { return ""; }
822 
getEHDataRegisterNumber(unsigned RegNo) const823 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
824   if (RegNo == 0)
825     return 0;
826   if (RegNo == 1)
827     return 1;
828   return -1;
829 }
830 
hasInt128Type() const831 bool AArch64TargetInfo::hasInt128Type() const { return true; }
832 
AArch64leTargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)833 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
834                                          const TargetOptions &Opts)
835     : AArch64TargetInfo(Triple, Opts) {}
836 
setDataLayout()837 void AArch64leTargetInfo::setDataLayout() {
838   if (getTriple().isOSBinFormatMachO()) {
839     if(getTriple().isArch32Bit())
840       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
841     else
842       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
843   } else
844     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
845 }
846 
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const847 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
848                                            MacroBuilder &Builder) const {
849   Builder.defineMacro("__AARCH64EL__");
850   AArch64TargetInfo::getTargetDefines(Opts, Builder);
851 }
852 
AArch64beTargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)853 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
854                                          const TargetOptions &Opts)
855     : AArch64TargetInfo(Triple, Opts) {}
856 
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const857 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
858                                            MacroBuilder &Builder) const {
859   Builder.defineMacro("__AARCH64EB__");
860   Builder.defineMacro("__AARCH_BIG_ENDIAN");
861   Builder.defineMacro("__ARM_BIG_ENDIAN");
862   AArch64TargetInfo::getTargetDefines(Opts, Builder);
863 }
864 
setDataLayout()865 void AArch64beTargetInfo::setDataLayout() {
866   assert(!getTriple().isOSBinFormatMachO());
867   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
868 }
869 
WindowsARM64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)870 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
871                                                const TargetOptions &Opts)
872     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
873 
874   // This is an LLP64 platform.
875   // int:4, long:4, long long:8, long double:8.
876   IntWidth = IntAlign = 32;
877   LongWidth = LongAlign = 32;
878   DoubleAlign = LongLongAlign = 64;
879   LongDoubleWidth = LongDoubleAlign = 64;
880   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
881   IntMaxType = SignedLongLong;
882   Int64Type = SignedLongLong;
883   SizeType = UnsignedLongLong;
884   PtrDiffType = SignedLongLong;
885   IntPtrType = SignedLongLong;
886 }
887 
setDataLayout()888 void WindowsARM64TargetInfo::setDataLayout() {
889   resetDataLayout(Triple.isOSBinFormatMachO()
890                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
891                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
892                   Triple.isOSBinFormatMachO() ? "_" : "");
893 }
894 
895 TargetInfo::BuiltinVaListKind
getBuiltinVaListKind() const896 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
897   return TargetInfo::CharPtrBuiltinVaList;
898 }
899 
900 TargetInfo::CallingConvCheckResult
checkCallingConvention(CallingConv CC) const901 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
902   switch (CC) {
903   case CC_X86StdCall:
904   case CC_X86ThisCall:
905   case CC_X86FastCall:
906   case CC_X86VectorCall:
907     return CCCR_Ignore;
908   case CC_C:
909   case CC_OpenCLKernel:
910   case CC_PreserveMost:
911   case CC_PreserveAll:
912   case CC_Swift:
913   case CC_SwiftAsync:
914   case CC_Win64:
915     return CCCR_OK;
916   default:
917     return CCCR_Warning;
918   }
919 }
920 
MicrosoftARM64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)921 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
922                                                    const TargetOptions &Opts)
923     : WindowsARM64TargetInfo(Triple, Opts) {
924   TheCXXABI.set(TargetCXXABI::Microsoft);
925 }
926 
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const927 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
928                                                 MacroBuilder &Builder) const {
929   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
930   Builder.defineMacro("_M_ARM64", "1");
931 }
932 
933 TargetInfo::CallingConvKind
getCallingConvKind(bool ClangABICompat4) const934 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
935   return CCK_MicrosoftWin64;
936 }
937 
getMinGlobalAlign(uint64_t TypeSize) const938 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
939   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
940 
941   // MSVC does size based alignment for arm64 based on alignment section in
942   // below document, replicate that to keep alignment consistent with object
943   // files compiled by MSVC.
944   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
945   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
946     Align = std::max(Align, 128u);    // align type at least 16 bytes
947   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
948     Align = std::max(Align, 64u);     // align type at least 8 butes
949   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
950     Align = std::max(Align, 32u);     // align type at least 4 bytes
951   }
952   return Align;
953 }
954 
MinGWARM64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)955 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
956                                            const TargetOptions &Opts)
957     : WindowsARM64TargetInfo(Triple, Opts) {
958   TheCXXABI.set(TargetCXXABI::GenericAArch64);
959 }
960 
DarwinAArch64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)961 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
962                                                  const TargetOptions &Opts)
963     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
964   Int64Type = SignedLongLong;
965   if (getTriple().isArch32Bit())
966     IntMaxType = SignedLongLong;
967 
968   WCharType = SignedInt;
969   UseSignedCharForObjCBool = false;
970 
971   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
972   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
973 
974   UseZeroLengthBitfieldAlignment = false;
975 
976   if (getTriple().isArch32Bit()) {
977     UseBitFieldTypeAlignment = false;
978     ZeroLengthBitfieldBoundary = 32;
979     UseZeroLengthBitfieldAlignment = true;
980     TheCXXABI.set(TargetCXXABI::WatchOS);
981   } else
982     TheCXXABI.set(TargetCXXABI::AppleARM64);
983 }
984 
getOSDefines(const LangOptions & Opts,const llvm::Triple & Triple,MacroBuilder & Builder) const985 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
986                                            const llvm::Triple &Triple,
987                                            MacroBuilder &Builder) const {
988   Builder.defineMacro("__AARCH64_SIMD__");
989   if (Triple.isArch32Bit())
990     Builder.defineMacro("__ARM64_ARCH_8_32__");
991   else
992     Builder.defineMacro("__ARM64_ARCH_8__");
993   Builder.defineMacro("__ARM_NEON__");
994   Builder.defineMacro("__LITTLE_ENDIAN__");
995   Builder.defineMacro("__REGISTER_PREFIX__", "");
996   Builder.defineMacro("__arm64", "1");
997   Builder.defineMacro("__arm64__", "1");
998 
999   if (Triple.isArm64e())
1000     Builder.defineMacro("__arm64e__", "1");
1001 
1002   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1003 }
1004 
1005 TargetInfo::BuiltinVaListKind
getBuiltinVaListKind() const1006 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1007   return TargetInfo::CharPtrBuiltinVaList;
1008 }
1009 
1010 // 64-bit RenderScript is aarch64
RenderScript64TargetInfo(const llvm::Triple & Triple,const TargetOptions & Opts)1011 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1012                                                    const TargetOptions &Opts)
1013     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1014                                        Triple.getOSName(),
1015                                        Triple.getEnvironmentName()),
1016                           Opts) {
1017   IsRenderScriptTarget = true;
1018 }
1019 
getTargetDefines(const LangOptions & Opts,MacroBuilder & Builder) const1020 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1021                                                 MacroBuilder &Builder) const {
1022   Builder.defineMacro("__RENDERSCRIPT__");
1023   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1024 }
1025