1 //===-- ARMSubtarget.h - Define Subtarget for the ARM ----------*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares the ARM specific subclass of TargetSubtargetInfo.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
14 #define LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
15 
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMConstantPoolValue.h"
19 #include "ARMFrameLowering.h"
20 #include "ARMISelLowering.h"
21 #include "ARMMachineFunctionInfo.h"
22 #include "ARMSelectionDAGInfo.h"
23 #include "llvm/ADT/Triple.h"
24 #include "llvm/Analysis/TargetTransformInfo.h"
25 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
26 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
27 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
28 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/TargetSubtargetInfo.h"
31 #include "llvm/MC/MCInstrItineraries.h"
32 #include "llvm/MC/MCSchedule.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include <memory>
36 #include <string>
37 
38 #define GET_SUBTARGETINFO_HEADER
39 #include "ARMGenSubtargetInfo.inc"
40 
41 namespace llvm {
42 
43 class ARMBaseTargetMachine;
44 class GlobalValue;
45 class StringRef;
46 
47 class ARMSubtarget : public ARMGenSubtargetInfo {
48 protected:
49   enum ARMProcFamilyEnum {
50     Others,
51 
52     CortexA12,
53     CortexA15,
54     CortexA17,
55     CortexA32,
56     CortexA35,
57     CortexA5,
58     CortexA53,
59     CortexA55,
60     CortexA57,
61     CortexA7,
62     CortexA72,
63     CortexA73,
64     CortexA75,
65     CortexA76,
66     CortexA77,
67     CortexA78,
68     CortexA78C,
69     CortexA710,
70     CortexA8,
71     CortexA9,
72     CortexM3,
73     CortexM7,
74     CortexR4,
75     CortexR4F,
76     CortexR5,
77     CortexR52,
78     CortexR7,
79     CortexX1,
80     CortexX1C,
81     Exynos,
82     Krait,
83     Kryo,
84     NeoverseN1,
85     NeoverseN2,
86     NeoverseV1,
87     Swift
88   };
89   enum ARMProcClassEnum {
90     None,
91 
92     AClass,
93     MClass,
94     RClass
95   };
96   enum ARMArchEnum {
97     ARMv2,
98     ARMv2a,
99     ARMv3,
100     ARMv3m,
101     ARMv4,
102     ARMv4t,
103     ARMv5,
104     ARMv5t,
105     ARMv5te,
106     ARMv5tej,
107     ARMv6,
108     ARMv6k,
109     ARMv6kz,
110     ARMv6m,
111     ARMv6sm,
112     ARMv6t2,
113     ARMv7a,
114     ARMv7em,
115     ARMv7m,
116     ARMv7r,
117     ARMv7ve,
118     ARMv81a,
119     ARMv82a,
120     ARMv83a,
121     ARMv84a,
122     ARMv85a,
123     ARMv86a,
124     ARMv87a,
125     ARMv88a,
126     ARMv8a,
127     ARMv8mBaseline,
128     ARMv8mMainline,
129     ARMv8r,
130     ARMv81mMainline,
131     ARMv9a,
132     ARMv91a,
133     ARMv92a,
134     ARMv93a,
135   };
136 
137 public:
138   /// What kind of timing do load multiple/store multiple instructions have.
139   enum ARMLdStMultipleTiming {
140     /// Can load/store 2 registers/cycle.
141     DoubleIssue,
142     /// Can load/store 2 registers/cycle, but needs an extra cycle if the access
143     /// is not 64-bit aligned.
144     DoubleIssueCheckUnalignedAccess,
145     /// Can load/store 1 register/cycle.
146     SingleIssue,
147     /// Can load/store 1 register/cycle, but needs an extra cycle for address
148     /// computation and potentially also for register writeback.
149     SingleIssuePlusExtras,
150   };
151 
152 protected:
153   /// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
154   ARMProcFamilyEnum ARMProcFamily = Others;
155 
156   /// ARMProcClass - ARM processor class: None, AClass, RClass or MClass.
157   ARMProcClassEnum ARMProcClass = None;
158 
159   /// ARMArch - ARM architecture
160   ARMArchEnum ARMArch = ARMv4t;
161 
162   /// HasV4TOps, HasV5TOps, HasV5TEOps,
163   /// HasV6Ops, HasV6MOps, HasV6KOps, HasV6T2Ops, HasV7Ops, HasV8Ops -
164   /// Specify whether target support specific ARM ISA variants.
165   bool HasV4TOps = false;
166   bool HasV5TOps = false;
167   bool HasV5TEOps = false;
168   bool HasV6Ops = false;
169   bool HasV6MOps = false;
170   bool HasV6KOps = false;
171   bool HasV6T2Ops = false;
172   bool HasV7Ops = false;
173   bool HasV8Ops = false;
174   bool HasV8_1aOps = false;
175   bool HasV8_2aOps = false;
176   bool HasV8_3aOps = false;
177   bool HasV8_4aOps = false;
178   bool HasV8_5aOps = false;
179   bool HasV8_6aOps = false;
180   bool HasV8_8aOps = false;
181   bool HasV8_7aOps = false;
182   bool HasV9_0aOps = false;
183   bool HasV9_1aOps = false;
184   bool HasV9_2aOps = false;
185   bool HasV9_3aOps = false;
186   bool HasV8MBaselineOps = false;
187   bool HasV8MMainlineOps = false;
188   bool HasV8_1MMainlineOps = false;
189   bool HasMVEIntegerOps = false;
190   bool HasMVEFloatOps = false;
191   bool HasCDEOps = false;
192 
193   /// HasVFPv2, HasVFPv3, HasVFPv4, HasFPARMv8, HasNEON - Specify what
194   /// floating point ISAs are supported.
195   bool HasVFPv2 = false;
196   bool HasVFPv3 = false;
197   bool HasVFPv4 = false;
198   bool HasFPARMv8 = false;
199   bool HasNEON = false;
200   bool HasFPRegs = false;
201   bool HasFPRegs16 = false;
202   bool HasFPRegs64 = false;
203 
204   /// Versions of the VFP flags restricted to single precision, or to
205   /// 16 d-registers, or both.
206   bool HasVFPv2SP = false;
207   bool HasVFPv3SP = false;
208   bool HasVFPv4SP = false;
209   bool HasFPARMv8SP = false;
210   bool HasVFPv3D16 = false;
211   bool HasVFPv4D16 = false;
212   bool HasFPARMv8D16 = false;
213   bool HasVFPv3D16SP = false;
214   bool HasVFPv4D16SP = false;
215   bool HasFPARMv8D16SP = false;
216 
217   /// HasDotProd - True if the ARMv8.2A dot product instructions are supported.
218   bool HasDotProd = false;
219 
220   /// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
221   /// specified. Use the method useNEONForSinglePrecisionFP() to
222   /// determine if NEON should actually be used.
223   bool UseNEONForSinglePrecisionFP = false;
224 
225   /// UseMulOps - True if non-microcoded fused integer multiply-add and
226   /// multiply-subtract instructions should be used.
227   bool UseMulOps = false;
228 
229   /// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates
230   /// whether the FP VML[AS] instructions are slow (if so, don't use them).
231   bool SlowFPVMLx = false;
232 
233   /// SlowFPVFMx - If the VFP4 / NEON instructions are available, indicates
234   /// whether the FP VFM[AS] instructions are slow (if so, don't use them).
235   bool SlowFPVFMx = false;
236 
237   /// HasVMLxForwarding - If true, NEON has special multiplier accumulator
238   /// forwarding to allow mul + mla being issued back to back.
239   bool HasVMLxForwarding = false;
240 
241   /// SlowFPBrcc - True if floating point compare + branch is slow.
242   bool SlowFPBrcc = false;
243 
244   /// InThumbMode - True if compiling for Thumb, false for ARM.
245   bool InThumbMode = false;
246 
247   /// UseSoftFloat - True if we're using software floating point features.
248   bool UseSoftFloat = false;
249 
250   /// UseMISched - True if MachineScheduler should be used for this subtarget.
251   bool UseMISched = false;
252 
253   /// DisablePostRAScheduler - False if scheduling should happen again after
254   /// register allocation.
255   bool DisablePostRAScheduler = false;
256 
257   /// HasThumb2 - True if Thumb2 instructions are supported.
258   bool HasThumb2 = false;
259 
260   /// NoARM - True if subtarget does not support ARM mode execution.
261   bool NoARM = false;
262 
263   /// ReserveR9 - True if R9 is not available as a general purpose register.
264   bool ReserveR9 = false;
265 
266   /// NoMovt - True if MOVT / MOVW pairs are not used for materialization of
267   /// 32-bit imms (including global addresses).
268   bool NoMovt = false;
269 
270   /// SupportsTailCall - True if the OS supports tail call. The dynamic linker
271   /// must be able to synthesize call stubs for interworking between ARM and
272   /// Thumb.
273   bool SupportsTailCall = false;
274 
275   /// HasFP16 - True if subtarget supports half-precision FP conversions
276   bool HasFP16 = false;
277 
278   /// HasFullFP16 - True if subtarget supports half-precision FP operations
279   bool HasFullFP16 = false;
280 
281   /// HasFP16FML - True if subtarget supports half-precision FP fml operations
282   bool HasFP16FML = false;
283 
284   /// HasBF16 - True if subtarget supports BFloat16 floating point operations
285   bool HasBF16 = false;
286 
287   /// HasMatMulInt8 - True if subtarget supports 8-bit integer matrix multiply
288   bool HasMatMulInt8 = false;
289 
290   /// HasD32 - True if subtarget has the full 32 double precision
291   /// FP registers for VFPv3.
292   bool HasD32 = false;
293 
294   /// HasHardwareDivide - True if subtarget supports [su]div in Thumb mode
295   bool HasHardwareDivideInThumb = false;
296 
297   /// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode
298   bool HasHardwareDivideInARM = false;
299 
300   /// HasDataBarrier - True if the subtarget supports DMB / DSB data barrier
301   /// instructions.
302   bool HasDataBarrier = false;
303 
304   /// HasFullDataBarrier - True if the subtarget supports DFB data barrier
305   /// instruction.
306   bool HasFullDataBarrier = false;
307 
308   /// HasV7Clrex - True if the subtarget supports CLREX instructions
309   bool HasV7Clrex = false;
310 
311   /// HasAcquireRelease - True if the subtarget supports v8 atomics (LDA/LDAEX etc)
312   /// instructions
313   bool HasAcquireRelease = false;
314 
315   /// Pref32BitThumb - If true, codegen would prefer 32-bit Thumb instructions
316   /// over 16-bit ones.
317   bool Pref32BitThumb = false;
318 
319   /// AvoidCPSRPartialUpdate - If true, codegen would avoid using instructions
320   /// that partially update CPSR and add false dependency on the previous
321   /// CPSR setting instruction.
322   bool AvoidCPSRPartialUpdate = false;
323 
324   /// CheapPredicableCPSRDef - If true, disable +1 predication cost
325   /// for instructions updating CPSR. Enabled for Cortex-A57.
326   bool CheapPredicableCPSRDef = false;
327 
328   /// AvoidMOVsShifterOperand - If true, codegen should avoid using flag setting
329   /// movs with shifter operand (i.e. asr, lsl, lsr).
330   bool AvoidMOVsShifterOperand = false;
331 
332   /// HasRetAddrStack - Some processors perform return stack prediction. CodeGen should
333   /// avoid issue "normal" call instructions to callees which do not return.
334   bool HasRetAddrStack = false;
335 
336   /// HasBranchPredictor - True if the subtarget has a branch predictor. Having
337   /// a branch predictor or not changes the expected cost of taking a branch
338   /// which affects the choice of whether to use predicated instructions.
339   bool HasBranchPredictor = true;
340 
341   /// HasMPExtension - True if the subtarget supports Multiprocessing
342   /// extension (ARMv7 only).
343   bool HasMPExtension = false;
344 
345   /// HasVirtualization - True if the subtarget supports the Virtualization
346   /// extension.
347   bool HasVirtualization = false;
348 
349   /// HasFP64 - If true, the floating point unit supports double
350   /// precision.
351   bool HasFP64 = false;
352 
353   /// If true, the processor supports the Performance Monitor Extensions. These
354   /// include a generic cycle-counter as well as more fine-grained (often
355   /// implementation-specific) events.
356   bool HasPerfMon = false;
357 
358   /// HasTrustZone - if true, processor supports TrustZone security extensions
359   bool HasTrustZone = false;
360 
361   /// Has8MSecExt - if true, processor supports ARMv8-M Security Extensions
362   bool Has8MSecExt = false;
363 
364   /// HasSHA2 - if true, processor supports SHA1 and SHA256
365   bool HasSHA2 = false;
366 
367   /// HasAES - if true, processor supports AES
368   bool HasAES = false;
369 
370   /// HasCrypto - if true, processor supports Cryptography extensions
371   bool HasCrypto = false;
372 
373   /// HasCRC - if true, processor supports CRC instructions
374   bool HasCRC = false;
375 
376   /// HasRAS - if true, the processor supports RAS extensions
377   bool HasRAS = false;
378 
379   /// HasLOB - if true, the processor supports the Low Overhead Branch extension
380   bool HasLOB = false;
381 
382   bool HasPACBTI = false;
383 
384   /// If true, the instructions "vmov.i32 d0, #0" and "vmov.i32 q0, #0" are
385   /// particularly effective at zeroing a VFP register.
386   bool HasZeroCycleZeroing = false;
387 
388   /// HasFPAO - if true, processor  does positive address offset computation faster
389   bool HasFPAO = false;
390 
391   /// HasFuseAES - if true, processor executes back to back AES instruction
392   /// pairs faster.
393   bool HasFuseAES = false;
394 
395   /// HasFuseLiterals - if true, processor executes back to back
396   /// bottom and top halves of literal generation faster.
397   bool HasFuseLiterals = false;
398 
399   /// If true, if conversion may decide to leave some instructions unpredicated.
400   bool IsProfitableToUnpredicate = false;
401 
402   /// If true, VMOV will be favored over VGETLNi32.
403   bool HasSlowVGETLNi32 = false;
404 
405   /// If true, VMOV will be favored over VDUP.
406   bool HasSlowVDUP32 = false;
407 
408   /// If true, VMOVSR will be favored over VMOVDRR.
409   bool PreferVMOVSR = false;
410 
411   /// If true, ISHST barriers will be used for Release semantics.
412   bool PreferISHST = false;
413 
414   /// If true, a VLDM/VSTM starting with an odd register number is considered to
415   /// take more microops than single VLDRS/VSTRS.
416   bool SlowOddRegister = false;
417 
418   /// If true, loading into a D subregister will be penalized.
419   bool SlowLoadDSubregister = false;
420 
421   /// If true, use a wider stride when allocating VFP registers.
422   bool UseWideStrideVFP = false;
423 
424   /// If true, the AGU and NEON/FPU units are multiplexed.
425   bool HasMuxedUnits = false;
426 
427   /// If true, VMOVS will never be widened to VMOVD.
428   bool DontWidenVMOVS = false;
429 
430   /// If true, splat a register between VFP and NEON instructions.
431   bool SplatVFPToNeon = false;
432 
433   /// If true, run the MLx expansion pass.
434   bool ExpandMLx = false;
435 
436   /// If true, VFP/NEON VMLA/VMLS have special RAW hazards.
437   bool HasVMLxHazards = false;
438 
439   // If true, read thread pointer from coprocessor register.
440   bool ReadTPHard = false;
441 
442   /// If true, VMOVRS, VMOVSR and VMOVS will be converted from VFP to NEON.
443   bool UseNEONForFPMovs = false;
444 
445   /// If true, VLDn instructions take an extra cycle for unaligned accesses.
446   bool CheckVLDnAlign = false;
447 
448   /// If true, VFP instructions are not pipelined.
449   bool NonpipelinedVFP = false;
450 
451   /// StrictAlign - If true, the subtarget disallows unaligned memory
452   /// accesses for some types.  For details, see
453   /// ARMTargetLowering::allowsMisalignedMemoryAccesses().
454   bool StrictAlign = false;
455 
456   /// RestrictIT - If true, the subtarget disallows generation of deprecated IT
457   ///  blocks to conform to ARMv8 rule.
458   bool RestrictIT = false;
459 
460   /// HasDSP - If true, the subtarget supports the DSP (saturating arith
461   /// and such) instructions.
462   bool HasDSP = false;
463 
464   /// NaCl TRAP instruction is generated instead of the regular TRAP.
465   bool UseNaClTrap = false;
466 
467   /// Generate calls via indirect call instructions.
468   bool GenLongCalls = false;
469 
470   /// Generate code that does not contain data access to code sections.
471   bool GenExecuteOnly = false;
472 
473   /// Target machine allowed unsafe FP math (such as use of NEON fp)
474   bool UnsafeFPMath = false;
475 
476   /// UseSjLjEH - If true, the target uses SjLj exception handling (e.g. iOS).
477   bool UseSjLjEH = false;
478 
479   /// Has speculation barrier
480   bool HasSB = false;
481 
482   /// Implicitly convert an instruction to a different one if its immediates
483   /// cannot be encoded. For example, ADD r0, r1, #FFFFFFFF -> SUB r0, r1, #1.
484   bool NegativeImmediates = true;
485 
486   /// Mitigate against the cve-2021-35465 security vulnurability.
487   bool FixCMSE_CVE_2021_35465 = false;
488 
489   /// Harden against Straight Line Speculation for Returns and Indirect
490   /// Branches.
491   bool HardenSlsRetBr = false;
492 
493   /// Harden against Straight Line Speculation for indirect calls.
494   bool HardenSlsBlr = false;
495 
496   /// Generate thunk code for SLS mitigation in the normal text section.
497   bool HardenSlsNoComdat = false;
498 
499   /// stackAlignment - The minimum alignment known to hold of the stack frame on
500   /// entry to the function and which must be maintained by every function.
501   Align stackAlignment = Align(4);
502 
503   /// CPUString - String name of used CPU.
504   std::string CPUString;
505 
506   unsigned MaxInterleaveFactor = 1;
507 
508   /// Clearance before partial register updates (in number of instructions)
509   unsigned PartialUpdateClearance = 0;
510 
511   /// What kind of timing do load multiple/store multiple have (double issue,
512   /// single issue etc).
513   ARMLdStMultipleTiming LdStMultipleTiming = SingleIssue;
514 
515   /// The adjustment that we need to apply to get the operand latency from the
516   /// operand cycle returned by the itinerary data for pre-ISel operands.
517   int PreISelOperandLatencyAdjustment = 2;
518 
519   /// What alignment is preferred for loop bodies, in log2(bytes).
520   unsigned PrefLoopLogAlignment = 0;
521 
522   /// The cost factor for MVE instructions, representing the multiple beats an
523   // instruction can take. The default is 2, (set in initSubtargetFeatures so
524   // that we can use subtarget features less than 2).
525   unsigned MVEVectorCostFactor = 0;
526 
527   /// OptMinSize - True if we're optimising for minimum code size, equal to
528   /// the function attribute.
529   bool OptMinSize = false;
530 
531   /// IsLittle - The target is Little Endian
532   bool IsLittle;
533 
534   /// TargetTriple - What processor and OS we're targeting.
535   Triple TargetTriple;
536 
537   /// SchedModel - Processor specific instruction costs.
538   MCSchedModel SchedModel;
539 
540   /// Selected instruction itineraries (one entry per itinerary class.)
541   InstrItineraryData InstrItins;
542 
543   /// NoBTIAtReturnTwice - Don't place a BTI instruction after
544   /// return-twice constructs (setjmp)
545   bool NoBTIAtReturnTwice = false;
546 
547   /// Options passed via command line that could influence the target
548   const TargetOptions &Options;
549 
550   const ARMBaseTargetMachine &TM;
551 
552 public:
553   /// This constructor initializes the data members to match that
554   /// of the specified triple.
555   ///
556   ARMSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
557                const ARMBaseTargetMachine &TM, bool IsLittle,
558                bool MinSize = false);
559 
560   /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
561   /// that still makes it profitable to inline the call.
562   unsigned getMaxInlineSizeThreshold() const {
563     return 64;
564   }
565 
566   /// getMaxMemcpyTPInlineSizeThreshold - Returns the maximum size
567   /// that still makes it profitable to inline a llvm.memcpy as a Tail
568   /// Predicated loop.
569   /// This threshold should only be used for constant size inputs.
570   unsigned getMaxMemcpyTPInlineSizeThreshold() const { return 128; }
571 
572   /// ParseSubtargetFeatures - Parses features string setting specified
573   /// subtarget options.  Definition of function is auto generated by tblgen.
574   void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
575 
576   /// initializeSubtargetDependencies - Initializes using a CPU and feature string
577   /// so that we can use initializer lists for subtarget initialization.
578   ARMSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
579 
580   const ARMSelectionDAGInfo *getSelectionDAGInfo() const override {
581     return &TSInfo;
582   }
583 
584   const ARMBaseInstrInfo *getInstrInfo() const override {
585     return InstrInfo.get();
586   }
587 
588   const ARMTargetLowering *getTargetLowering() const override {
589     return &TLInfo;
590   }
591 
592   const ARMFrameLowering *getFrameLowering() const override {
593     return FrameLowering.get();
594   }
595 
596   const ARMBaseRegisterInfo *getRegisterInfo() const override {
597     return &InstrInfo->getRegisterInfo();
598   }
599 
600   const CallLowering *getCallLowering() const override;
601   InstructionSelector *getInstructionSelector() const override;
602   const LegalizerInfo *getLegalizerInfo() const override;
603   const RegisterBankInfo *getRegBankInfo() const override;
604 
605 private:
606   ARMSelectionDAGInfo TSInfo;
607   // Either Thumb1FrameLowering or ARMFrameLowering.
608   std::unique_ptr<ARMFrameLowering> FrameLowering;
609   // Either Thumb1InstrInfo or Thumb2InstrInfo.
610   std::unique_ptr<ARMBaseInstrInfo> InstrInfo;
611   ARMTargetLowering   TLInfo;
612 
613   /// GlobalISel related APIs.
614   std::unique_ptr<CallLowering> CallLoweringInfo;
615   std::unique_ptr<InstructionSelector> InstSelector;
616   std::unique_ptr<LegalizerInfo> Legalizer;
617   std::unique_ptr<RegisterBankInfo> RegBankInfo;
618 
619   void initializeEnvironment();
620   void initSubtargetFeatures(StringRef CPU, StringRef FS);
621   ARMFrameLowering *initializeFrameLowering(StringRef CPU, StringRef FS);
622 
623   std::bitset<8> CoprocCDE = {};
624 public:
625   void computeIssueWidth();
626 
627   bool hasV4TOps()  const { return HasV4TOps;  }
628   bool hasV5TOps()  const { return HasV5TOps;  }
629   bool hasV5TEOps() const { return HasV5TEOps; }
630   bool hasV6Ops()   const { return HasV6Ops;   }
631   bool hasV6MOps()  const { return HasV6MOps;  }
632   bool hasV6KOps()  const { return HasV6KOps; }
633   bool hasV6T2Ops() const { return HasV6T2Ops; }
634   bool hasV7Ops()   const { return HasV7Ops;  }
635   bool hasV8Ops()   const { return HasV8Ops;  }
636   bool hasV8_1aOps() const { return HasV8_1aOps; }
637   bool hasV8_2aOps() const { return HasV8_2aOps; }
638   bool hasV8_3aOps() const { return HasV8_3aOps; }
639   bool hasV8_4aOps() const { return HasV8_4aOps; }
640   bool hasV8_5aOps() const { return HasV8_5aOps; }
641   bool hasV8_6aOps() const { return HasV8_6aOps; }
642   bool hasV8_7aOps() const { return HasV8_7aOps; }
643   bool hasV8_8aOps() const { return HasV8_8aOps; }
644   bool hasV9_0aOps() const { return HasV9_0aOps; }
645   bool hasV9_1aOps() const { return HasV9_1aOps; }
646   bool hasV9_2aOps() const { return HasV9_2aOps; }
647   bool hasV9_3aOps() const { return HasV9_3aOps; }
648   bool hasV8MBaselineOps() const { return HasV8MBaselineOps; }
649   bool hasV8MMainlineOps() const { return HasV8MMainlineOps; }
650   bool hasV8_1MMainlineOps() const { return HasV8_1MMainlineOps; }
651   bool hasMVEIntegerOps() const { return HasMVEIntegerOps; }
652   bool hasMVEFloatOps() const { return HasMVEFloatOps; }
653   bool hasCDEOps() const { return HasCDEOps; }
654   bool hasFPRegs() const { return HasFPRegs; }
655   bool hasFPRegs16() const { return HasFPRegs16; }
656   bool hasFPRegs64() const { return HasFPRegs64; }
657 
658   /// @{
659   /// These functions are obsolete, please consider adding subtarget features
660   /// or properties instead of calling them.
661   bool isCortexA5() const { return ARMProcFamily == CortexA5; }
662   bool isCortexA7() const { return ARMProcFamily == CortexA7; }
663   bool isCortexA8() const { return ARMProcFamily == CortexA8; }
664   bool isCortexA9() const { return ARMProcFamily == CortexA9; }
665   bool isCortexA15() const { return ARMProcFamily == CortexA15; }
666   bool isSwift()    const { return ARMProcFamily == Swift; }
667   bool isCortexM3() const { return ARMProcFamily == CortexM3; }
668   bool isCortexM7() const { return ARMProcFamily == CortexM7; }
669   bool isLikeA9() const { return isCortexA9() || isCortexA15() || isKrait(); }
670   bool isCortexR5() const { return ARMProcFamily == CortexR5; }
671   bool isKrait() const { return ARMProcFamily == Krait; }
672   /// @}
673 
674   bool hasARMOps() const { return !NoARM; }
675 
676   bool hasVFP2Base() const { return HasVFPv2SP; }
677   bool hasVFP3Base() const { return HasVFPv3D16SP; }
678   bool hasVFP4Base() const { return HasVFPv4D16SP; }
679   bool hasFPARMv8Base() const { return HasFPARMv8D16SP; }
680   bool hasNEON() const { return HasNEON;  }
681   bool hasSHA2() const { return HasSHA2; }
682   bool hasAES() const { return HasAES; }
683   bool hasCrypto() const { return HasCrypto; }
684   bool hasDotProd() const { return HasDotProd; }
685   bool hasCRC() const { return HasCRC; }
686   bool hasRAS() const { return HasRAS; }
687   bool hasLOB() const { return HasLOB; }
688   bool hasPACBTI() const { return HasPACBTI; }
689   bool hasVirtualization() const { return HasVirtualization; }
690 
691   bool useNEONForSinglePrecisionFP() const {
692     return hasNEON() && UseNEONForSinglePrecisionFP;
693   }
694 
695   bool hasDivideInThumbMode() const { return HasHardwareDivideInThumb; }
696   bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
697   bool hasDataBarrier() const { return HasDataBarrier; }
698   bool hasFullDataBarrier() const { return HasFullDataBarrier; }
699   bool hasV7Clrex() const { return HasV7Clrex; }
700   bool hasAcquireRelease() const { return HasAcquireRelease; }
701 
702   bool hasAnyDataBarrier() const {
703     return HasDataBarrier || (hasV6Ops() && !isThumb());
704   }
705 
706   bool useMulOps() const { return UseMulOps; }
707   bool useFPVMLx() const { return !SlowFPVMLx; }
708   bool useFPVFMx() const {
709     return !isTargetDarwin() && hasVFP4Base() && !SlowFPVFMx;
710   }
711   bool useFPVFMx16() const { return useFPVFMx() && hasFullFP16(); }
712   bool useFPVFMx64() const { return useFPVFMx() && hasFP64(); }
713   bool hasVMLxForwarding() const { return HasVMLxForwarding; }
714   bool isFPBrccSlow() const { return SlowFPBrcc; }
715   bool hasFP64() const { return HasFP64; }
716   bool hasPerfMon() const { return HasPerfMon; }
717   bool hasTrustZone() const { return HasTrustZone; }
718   bool has8MSecExt() const { return Has8MSecExt; }
719   bool hasZeroCycleZeroing() const { return HasZeroCycleZeroing; }
720   bool hasFPAO() const { return HasFPAO; }
721   bool isProfitableToUnpredicate() const { return IsProfitableToUnpredicate; }
722   bool hasSlowVGETLNi32() const { return HasSlowVGETLNi32; }
723   bool hasSlowVDUP32() const { return HasSlowVDUP32; }
724   bool preferVMOVSR() const { return PreferVMOVSR; }
725   bool preferISHSTBarriers() const { return PreferISHST; }
726   bool expandMLx() const { return ExpandMLx; }
727   bool hasVMLxHazards() const { return HasVMLxHazards; }
728   bool hasSlowOddRegister() const { return SlowOddRegister; }
729   bool hasSlowLoadDSubregister() const { return SlowLoadDSubregister; }
730   bool useWideStrideVFP() const { return UseWideStrideVFP; }
731   bool hasMuxedUnits() const { return HasMuxedUnits; }
732   bool dontWidenVMOVS() const { return DontWidenVMOVS; }
733   bool useSplatVFPToNeon() const { return SplatVFPToNeon; }
734   bool useNEONForFPMovs() const { return UseNEONForFPMovs; }
735   bool checkVLDnAccessAlignment() const { return CheckVLDnAlign; }
736   bool nonpipelinedVFP() const { return NonpipelinedVFP; }
737   bool prefers32BitThumb() const { return Pref32BitThumb; }
738   bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
739   bool cheapPredicableCPSRDef() const { return CheapPredicableCPSRDef; }
740   bool avoidMOVsShifterOperand() const { return AvoidMOVsShifterOperand; }
741   bool hasRetAddrStack() const { return HasRetAddrStack; }
742   bool hasBranchPredictor() const { return HasBranchPredictor; }
743   bool hasMPExtension() const { return HasMPExtension; }
744   bool hasDSP() const { return HasDSP; }
745   bool useNaClTrap() const { return UseNaClTrap; }
746   bool useSjLjEH() const { return UseSjLjEH; }
747   bool hasSB() const { return HasSB; }
748   bool genLongCalls() const { return GenLongCalls; }
749   bool genExecuteOnly() const { return GenExecuteOnly; }
750   bool hasBaseDSP() const {
751     if (isThumb())
752       return hasDSP();
753     else
754       return hasV5TEOps();
755   }
756 
757   bool hasFP16() const { return HasFP16; }
758   bool hasD32() const { return HasD32; }
759   bool hasFullFP16() const { return HasFullFP16; }
760   bool hasFP16FML() const { return HasFP16FML; }
761   bool hasBF16() const { return HasBF16; }
762 
763   bool hasFuseAES() const { return HasFuseAES; }
764   bool hasFuseLiterals() const { return HasFuseLiterals; }
765   /// Return true if the CPU supports any kind of instruction fusion.
766   bool hasFusion() const { return hasFuseAES() || hasFuseLiterals(); }
767 
768   bool hasMatMulInt8() const { return HasMatMulInt8; }
769 
770   const Triple &getTargetTriple() const { return TargetTriple; }
771 
772   bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
773   bool isTargetIOS() const { return TargetTriple.isiOS(); }
774   bool isTargetWatchOS() const { return TargetTriple.isWatchOS(); }
775   bool isTargetWatchABI() const { return TargetTriple.isWatchABI(); }
776   bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
777   bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
778   bool isTargetNetBSD() const { return TargetTriple.isOSNetBSD(); }
779   bool isTargetWindows() const { return TargetTriple.isOSWindows(); }
780 
781   bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
782   bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
783   bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
784 
785   // ARM EABI is the bare-metal EABI described in ARM ABI documents and
786   // can be accessed via -target arm-none-eabi. This is NOT GNUEABI.
787   // FIXME: Add a flag for bare-metal for that target and set Triple::EABI
788   // even for GNUEABI, so we can make a distinction here and still conform to
789   // the EABI on GNU (and Android) mode. This requires change in Clang, too.
790   // FIXME: The Darwin exception is temporary, while we move users to
791   // "*-*-*-macho" triples as quickly as possible.
792   bool isTargetAEABI() const {
793     return (TargetTriple.getEnvironment() == Triple::EABI ||
794             TargetTriple.getEnvironment() == Triple::EABIHF) &&
795            !isTargetDarwin() && !isTargetWindows();
796   }
797   bool isTargetGNUAEABI() const {
798     return (TargetTriple.getEnvironment() == Triple::GNUEABI ||
799             TargetTriple.getEnvironment() == Triple::GNUEABIHF) &&
800            !isTargetDarwin() && !isTargetWindows();
801   }
802   bool isTargetMuslAEABI() const {
803     return (TargetTriple.getEnvironment() == Triple::MuslEABI ||
804             TargetTriple.getEnvironment() == Triple::MuslEABIHF) &&
805            !isTargetDarwin() && !isTargetWindows();
806   }
807 
808   // ARM Targets that support EHABI exception handling standard
809   // Darwin uses SjLj. Other targets might need more checks.
810   bool isTargetEHABICompatible() const {
811     return TargetTriple.isTargetEHABICompatible();
812   }
813 
814   bool isTargetHardFloat() const;
815 
816   bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
817 
818   bool isXRaySupported() const override;
819 
820   bool isAPCS_ABI() const;
821   bool isAAPCS_ABI() const;
822   bool isAAPCS16_ABI() const;
823 
824   bool isROPI() const;
825   bool isRWPI() const;
826 
827   bool useMachineScheduler() const { return UseMISched; }
828   bool disablePostRAScheduler() const { return DisablePostRAScheduler; }
829   bool useSoftFloat() const { return UseSoftFloat; }
830   bool isThumb() const { return InThumbMode; }
831   bool hasMinSize() const { return OptMinSize; }
832   bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
833   bool isThumb2() const { return InThumbMode && HasThumb2; }
834   bool hasThumb2() const { return HasThumb2; }
835   bool isMClass() const { return ARMProcClass == MClass; }
836   bool isRClass() const { return ARMProcClass == RClass; }
837   bool isAClass() const { return ARMProcClass == AClass; }
838   bool isReadTPHard() const { return ReadTPHard; }
839 
840   bool isR9Reserved() const {
841     return isTargetMachO() ? (ReserveR9 || !HasV6Ops) : ReserveR9;
842   }
843 
844   MCPhysReg getFramePointerReg() const {
845     if (isTargetDarwin() || (!isTargetWindows() && isThumb()))
846       return ARM::R7;
847     return ARM::R11;
848   }
849 
850   /// Returns true if the frame setup is split into two separate pushes (first
851   /// r0-r7,lr then r8-r11), principally so that the frame pointer is adjacent
852   /// to lr. This is always required on Thumb1-only targets, as the push and
853   /// pop instructions can't access the high registers.
854   bool splitFramePushPop(const MachineFunction &MF) const {
855     if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress())
856       return true;
857     return (getFramePointerReg() == ARM::R7 &&
858             MF.getTarget().Options.DisableFramePointerElim(MF)) ||
859            isThumb1Only();
860   }
861 
862   bool useStride4VFPs() const;
863 
864   bool useMovt() const;
865 
866   bool supportsTailCall() const { return SupportsTailCall; }
867 
868   bool allowsUnalignedMem() const { return !StrictAlign; }
869 
870   bool restrictIT() const { return RestrictIT; }
871 
872   const std::string & getCPUString() const { return CPUString; }
873 
874   bool isLittle() const { return IsLittle; }
875 
876   unsigned getMispredictionPenalty() const;
877 
878   /// Returns true if machine scheduler should be enabled.
879   bool enableMachineScheduler() const override;
880 
881   /// True for some subtargets at > -O0.
882   bool enablePostRAScheduler() const override;
883 
884   /// True for some subtargets at > -O0.
885   bool enablePostRAMachineScheduler() const override;
886 
887   /// Check whether this subtarget wants to use subregister liveness.
888   bool enableSubRegLiveness() const override;
889 
890   /// Enable use of alias analysis during code generation (during MI
891   /// scheduling, DAGCombine, etc.).
892   bool useAA() const override { return true; }
893 
894   // enableAtomicExpand- True if we need to expand our atomics.
895   bool enableAtomicExpand() const override;
896 
897   /// getInstrItins - Return the instruction itineraries based on subtarget
898   /// selection.
899   const InstrItineraryData *getInstrItineraryData() const override {
900     return &InstrItins;
901   }
902 
903   /// getStackAlignment - Returns the minimum alignment known to hold of the
904   /// stack frame on entry to the function and which must be maintained by every
905   /// function for this subtarget.
906   Align getStackAlignment() const { return stackAlignment; }
907 
908   unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
909 
910   unsigned getPartialUpdateClearance() const { return PartialUpdateClearance; }
911 
912   ARMLdStMultipleTiming getLdStMultipleTiming() const {
913     return LdStMultipleTiming;
914   }
915 
916   int getPreISelOperandLatencyAdjustment() const {
917     return PreISelOperandLatencyAdjustment;
918   }
919 
920   /// True if the GV will be accessed via an indirect symbol.
921   bool isGVIndirectSymbol(const GlobalValue *GV) const;
922 
923   /// Returns the constant pool modifier needed to access the GV.
924   bool isGVInGOT(const GlobalValue *GV) const;
925 
926   /// True if fast-isel is used.
927   bool useFastISel() const;
928 
929   /// Returns the correct return opcode for the current feature set.
930   /// Use BX if available to allow mixing thumb/arm code, but fall back
931   /// to plain mov pc,lr on ARMv4.
932   unsigned getReturnOpcode() const {
933     if (isThumb())
934       return ARM::tBX_RET;
935     if (hasV4TOps())
936       return ARM::BX_RET;
937     return ARM::MOVPCLR;
938   }
939 
940   /// Allow movt+movw for PIC global address calculation.
941   /// ELF does not have GOT relocations for movt+movw.
942   /// ROPI does not use GOT.
943   bool allowPositionIndependentMovt() const {
944     return isROPI() || !isTargetELF();
945   }
946 
947   unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; }
948 
949   unsigned
950   getMVEVectorCostFactor(TargetTransformInfo::TargetCostKind CostKind) const {
951     if (CostKind == TargetTransformInfo::TCK_CodeSize)
952       return 1;
953     return MVEVectorCostFactor;
954   }
955 
956   bool ignoreCSRForAllocationOrder(const MachineFunction &MF,
957                                    unsigned PhysReg) const override;
958   unsigned getGPRAllocationOrder(const MachineFunction &MF) const;
959 
960   bool fixCMSE_CVE_2021_35465() const { return FixCMSE_CVE_2021_35465; }
961 
962   bool hardenSlsRetBr() const { return HardenSlsRetBr; }
963   bool hardenSlsBlr() const { return HardenSlsBlr; }
964   bool hardenSlsNoComdat() const { return HardenSlsNoComdat; }
965 
966   bool getNoBTIAtReturnTwice() const { return NoBTIAtReturnTwice; }
967 };
968 
969 } // end namespace llvm
970 
971 #endif  // LLVM_LIB_TARGET_ARM_ARMSUBTARGET_H
972