1 //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15 
16 #include "AArch64.h"
17 #include "AArch64RegisterInfo.h"
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/CodeGen/MachineCombinerPattern.h"
20 #include "llvm/CodeGen/TargetInstrInfo.h"
21 #include "llvm/Support/TypeSize.h"
22 
23 #define GET_INSTRINFO_HEADER
24 #include "AArch64GenInstrInfo.inc"
25 
26 namespace llvm {
27 
28 class AArch64Subtarget;
29 
30 static const MachineMemOperand::Flags MOSuppressPair =
31     MachineMemOperand::MOTargetFlag1;
32 static const MachineMemOperand::Flags MOStridedAccess =
33     MachineMemOperand::MOTargetFlag2;
34 
35 #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
36 
37 class AArch64InstrInfo final : public AArch64GenInstrInfo {
38   const AArch64RegisterInfo RI;
39   const AArch64Subtarget &Subtarget;
40 
41 public:
42   explicit AArch64InstrInfo(const AArch64Subtarget &STI);
43 
44   /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
45   /// such, whenever a client has an instance of instruction info, it should
46   /// always be able to get register info as well (through this method).
47   const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
48 
49   unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
50 
51   bool isAsCheapAsAMove(const MachineInstr &MI) const override;
52 
53   bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
54                              Register &DstReg, unsigned &SubIdx) const override;
55 
56   bool
57   areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
58                                   const MachineInstr &MIb) const override;
59 
60   unsigned isLoadFromStackSlot(const MachineInstr &MI,
61                                int &FrameIndex) const override;
62   unsigned isStoreToStackSlot(const MachineInstr &MI,
63                               int &FrameIndex) const override;
64 
65   /// Does this instruction set its full destination register to zero?
66   static bool isGPRZero(const MachineInstr &MI);
67 
68   /// Does this instruction rename a GPR without modifying bits?
69   static bool isGPRCopy(const MachineInstr &MI);
70 
71   /// Does this instruction rename an FPR without modifying bits?
72   static bool isFPRCopy(const MachineInstr &MI);
73 
74   /// Return true if pairing the given load or store is hinted to be
75   /// unprofitable.
76   static bool isLdStPairSuppressed(const MachineInstr &MI);
77 
78   /// Return true if the given load or store is a strided memory access.
79   static bool isStridedAccess(const MachineInstr &MI);
80 
81   /// Return true if it has an unscaled load/store offset.
82   static bool hasUnscaledLdStOffset(unsigned Opc);
83   static bool hasUnscaledLdStOffset(MachineInstr &MI) {
84     return hasUnscaledLdStOffset(MI.getOpcode());
85   }
86 
87   /// Returns the unscaled load/store for the scaled load/store opcode,
88   /// if there is a corresponding unscaled variant available.
89   static Optional<unsigned> getUnscaledLdSt(unsigned Opc);
90 
91   /// Scaling factor for (scaled or unscaled) load or store.
92   static int getMemScale(unsigned Opc);
93   static int getMemScale(const MachineInstr &MI) {
94     return getMemScale(MI.getOpcode());
95   }
96 
97   /// Returns whether the instruction is a pre-indexed load.
98   static bool isPreLd(const MachineInstr &MI);
99 
100   /// Returns whether the instruction is a pre-indexed store.
101   static bool isPreSt(const MachineInstr &MI);
102 
103   /// Returns whether the instruction is a pre-indexed load/store.
104   static bool isPreLdSt(const MachineInstr &MI);
105 
106   /// Returns whether the instruction is a paired load/store.
107   static bool isPairedLdSt(const MachineInstr &MI);
108 
109   /// Returns the base register operator of a load/store.
110   static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
111 
112   /// Returns the the immediate offset operator of a load/store.
113   static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
114 
115   /// Returns whether the instruction is FP or NEON.
116   static bool isFpOrNEON(const MachineInstr &MI);
117 
118   /// Returns whether the instruction is in Q form (128 bit operands)
119   static bool isQForm(const MachineInstr &MI);
120 
121   /// Returns the index for the immediate for a given instruction.
122   static unsigned getLoadStoreImmIdx(unsigned Opc);
123 
124   /// Return true if pairing the given load or store may be paired with another.
125   static bool isPairableLdStInst(const MachineInstr &MI);
126 
127   /// Return the opcode that set flags when possible.  The caller is
128   /// responsible for ensuring the opc has a flag setting equivalent.
129   static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit);
130 
131   /// Return true if this is a load/store that can be potentially paired/merged.
132   bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
133 
134   /// Hint that pairing the given load or store is unprofitable.
135   static void suppressLdStPair(MachineInstr &MI);
136 
137   Optional<ExtAddrMode>
138   getAddrModeFromMemoryOp(const MachineInstr &MemI,
139                           const TargetRegisterInfo *TRI) const override;
140 
141   bool getMemOperandsWithOffsetWidth(
142       const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
143       int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
144       const TargetRegisterInfo *TRI) const override;
145 
146   /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
147   /// This is true for some SVE instructions like ldr/str that have a
148   /// 'reg + imm' addressing mode where the immediate is an index to the
149   /// scalable vector located at 'reg + imm * vscale x #bytes'.
150   bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
151                                     const MachineOperand *&BaseOp,
152                                     int64_t &Offset, bool &OffsetIsScalable,
153                                     unsigned &Width,
154                                     const TargetRegisterInfo *TRI) const;
155 
156   /// Return the immediate offset of the base register in a load/store \p LdSt.
157   MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
158 
159   /// Returns true if opcode \p Opc is a memory operation. If it is, set
160   /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
161   ///
162   /// For unscaled instructions, \p Scale is set to 1.
163   static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, unsigned &Width,
164                            int64_t &MinOffset, int64_t &MaxOffset);
165 
166   bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
167                            ArrayRef<const MachineOperand *> BaseOps2,
168                            unsigned NumLoads, unsigned NumBytes) const override;
169 
170   void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
171                         const DebugLoc &DL, MCRegister DestReg,
172                         MCRegister SrcReg, bool KillSrc, unsigned Opcode,
173                         llvm::ArrayRef<unsigned> Indices) const;
174   void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
175                        DebugLoc DL, unsigned DestReg, unsigned SrcReg,
176                        bool KillSrc, unsigned Opcode, unsigned ZeroReg,
177                        llvm::ArrayRef<unsigned> Indices) const;
178   void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
179                    const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
180                    bool KillSrc) const override;
181 
182   void storeRegToStackSlot(MachineBasicBlock &MBB,
183                            MachineBasicBlock::iterator MBBI, Register SrcReg,
184                            bool isKill, int FrameIndex,
185                            const TargetRegisterClass *RC,
186                            const TargetRegisterInfo *TRI) const override;
187 
188   void loadRegFromStackSlot(MachineBasicBlock &MBB,
189                             MachineBasicBlock::iterator MBBI, Register DestReg,
190                             int FrameIndex, const TargetRegisterClass *RC,
191                             const TargetRegisterInfo *TRI) const override;
192 
193   // This tells target independent code that it is okay to pass instructions
194   // with subreg operands to foldMemoryOperandImpl.
195   bool isSubregFoldable() const override { return true; }
196 
197   using TargetInstrInfo::foldMemoryOperandImpl;
198   MachineInstr *
199   foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
200                         ArrayRef<unsigned> Ops,
201                         MachineBasicBlock::iterator InsertPt, int FrameIndex,
202                         LiveIntervals *LIS = nullptr,
203                         VirtRegMap *VRM = nullptr) const override;
204 
205   /// \returns true if a branch from an instruction with opcode \p BranchOpc
206   ///  bytes is capable of jumping to a position \p BrOffset bytes away.
207   bool isBranchOffsetInRange(unsigned BranchOpc,
208                              int64_t BrOffset) const override;
209 
210   MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
211 
212   bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
213                      MachineBasicBlock *&FBB,
214                      SmallVectorImpl<MachineOperand> &Cond,
215                      bool AllowModify = false) const override;
216   bool analyzeBranchPredicate(MachineBasicBlock &MBB,
217                               MachineBranchPredicate &MBP,
218                               bool AllowModify) const override;
219   unsigned removeBranch(MachineBasicBlock &MBB,
220                         int *BytesRemoved = nullptr) const override;
221   unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
222                         MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
223                         const DebugLoc &DL,
224                         int *BytesAdded = nullptr) const override;
225   bool
226   reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
227   bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
228                        Register, Register, Register, int &, int &,
229                        int &) const override;
230   void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
231                     const DebugLoc &DL, Register DstReg,
232                     ArrayRef<MachineOperand> Cond, Register TrueReg,
233                     Register FalseReg) const override;
234   MCInst getNop() const override;
235 
236   bool isSchedulingBoundary(const MachineInstr &MI,
237                             const MachineBasicBlock *MBB,
238                             const MachineFunction &MF) const override;
239 
240   /// analyzeCompare - For a comparison instruction, return the source registers
241   /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
242   /// Return true if the comparison instruction can be analyzed.
243   bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
244                       Register &SrcReg2, int64_t &CmpMask,
245                       int64_t &CmpValue) const override;
246   /// optimizeCompareInstr - Convert the instruction supplying the argument to
247   /// the comparison into one that sets the zero bit in the flags register.
248   bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
249                             Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
250                             const MachineRegisterInfo *MRI) const override;
251   bool optimizeCondBranch(MachineInstr &MI) const override;
252 
253   /// Return true when a code sequence can improve throughput. It
254   /// should be called only for instructions in loops.
255   /// \param Pattern - combiner pattern
256   bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
257   /// Return true when there is potentially a faster code sequence
258   /// for an instruction chain ending in ``Root``. All potential patterns are
259   /// listed in the ``Patterns`` array.
260   bool
261   getMachineCombinerPatterns(MachineInstr &Root,
262                              SmallVectorImpl<MachineCombinerPattern> &Patterns,
263                              bool DoRegPressureReduce) const override;
264   /// Return true when Inst is associative and commutative so that it can be
265   /// reassociated.
266   bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
267   /// When getMachineCombinerPatterns() finds patterns, this function generates
268   /// the instructions that could replace the original code sequence
269   void genAlternativeCodeSequence(
270       MachineInstr &Root, MachineCombinerPattern Pattern,
271       SmallVectorImpl<MachineInstr *> &InsInstrs,
272       SmallVectorImpl<MachineInstr *> &DelInstrs,
273       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
274   /// AArch64 supports MachineCombiner.
275   bool useMachineCombiner() const override;
276 
277   bool expandPostRAPseudo(MachineInstr &MI) const override;
278 
279   std::pair<unsigned, unsigned>
280   decomposeMachineOperandsTargetFlags(unsigned TF) const override;
281   ArrayRef<std::pair<unsigned, const char *>>
282   getSerializableDirectMachineOperandTargetFlags() const override;
283   ArrayRef<std::pair<unsigned, const char *>>
284   getSerializableBitmaskMachineOperandTargetFlags() const override;
285   ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
286   getSerializableMachineMemOperandTargetFlags() const override;
287 
288   bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
289                                    bool OutlineFromLinkOnceODRs) const override;
290   outliner::OutlinedFunction getOutliningCandidateInfo(
291       std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
292   outliner::InstrType
293   getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
294   bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
295                               unsigned &Flags) const override;
296   void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
297                           const outliner::OutlinedFunction &OF) const override;
298   MachineBasicBlock::iterator
299   insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
300                      MachineBasicBlock::iterator &It, MachineFunction &MF,
301                      outliner::Candidate &C) const override;
302   bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
303   /// Returns the vector element size (B, H, S or D) of an SVE opcode.
304   uint64_t getElementSizeForOpcode(unsigned Opc) const;
305   /// Returns true if the opcode is for an SVE instruction that sets the
306   /// condition codes as if it's results had been fed to a PTEST instruction
307   /// along with the same general predicate.
308   bool isPTestLikeOpcode(unsigned Opc) const;
309   /// Returns true if the opcode is for an SVE WHILE## instruction.
310   bool isWhileOpcode(unsigned Opc) const;
311   /// Returns true if the instruction has a shift by immediate that can be
312   /// executed in one cycle less.
313   static bool isFalkorShiftExtFast(const MachineInstr &MI);
314   /// Return true if the instructions is a SEH instruciton used for unwinding
315   /// on Windows.
316   static bool isSEHInstruction(const MachineInstr &MI);
317 
318   Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
319                                       Register Reg) const override;
320 
321   Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
322                                                  Register Reg) const override;
323 
324   unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override;
325 
326   bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
327                                 MachineRegisterInfo &MRI) const override;
328 
329   static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset,
330                                                   int64_t &NumBytes,
331                                                   int64_t &NumPredicateVectors,
332                                                   int64_t &NumDataVectors);
333   static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset,
334                                                   int64_t &ByteSized,
335                                                   int64_t &VGSized);
336 #define GET_INSTRINFO_HELPER_DECLS
337 #include "AArch64GenInstrInfo.inc"
338 
339 protected:
340   /// If the specific machine instruction is an instruction that moves/copies
341   /// value from one register to another register return destination and source
342   /// registers as machine operands.
343   Optional<DestSourcePair>
344   isCopyInstrImpl(const MachineInstr &MI) const override;
345 
346 private:
347   unsigned getInstBundleLength(const MachineInstr &MI) const;
348 
349   /// Sets the offsets on outlined instructions in \p MBB which use SP
350   /// so that they will be valid post-outlining.
351   ///
352   /// \param MBB A \p MachineBasicBlock in an outlined function.
353   void fixupPostOutline(MachineBasicBlock &MBB) const;
354 
355   void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
356                              MachineBasicBlock *TBB,
357                              ArrayRef<MachineOperand> Cond) const;
358   bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
359                            const MachineRegisterInfo &MRI) const;
360   bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
361                             int CmpValue, const MachineRegisterInfo &MRI) const;
362 
363   /// Returns an unused general-purpose register which can be used for
364   /// constructing an outlined call if one exists. Returns 0 otherwise.
365   Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
366 
367   /// Remove a ptest of a predicate-generating operation that already sets, or
368   /// can be made to set, the condition codes in an identical manner
369   bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
370                           unsigned PredReg,
371                           const MachineRegisterInfo *MRI) const;
372 };
373 
374 struct UsedNZCV {
375   bool N = false;
376   bool Z = false;
377   bool C = false;
378   bool V = false;
379 
380   UsedNZCV() = default;
381 
382   UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
383     this->N |= UsedFlags.N;
384     this->Z |= UsedFlags.Z;
385     this->C |= UsedFlags.C;
386     this->V |= UsedFlags.V;
387     return *this;
388   }
389 };
390 
391 /// \returns Conditions flags used after \p CmpInstr in its MachineBB if  NZCV
392 /// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
393 /// \returns None otherwise.
394 ///
395 /// Collect instructions using that flags in \p CCUseInstrs if provided.
396 Optional<UsedNZCV>
397 examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
398                  const TargetRegisterInfo &TRI,
399                  SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
400 
401 /// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
402 /// which either reads or clobbers NZCV.
403 bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
404                                      const MachineInstr &UseMI,
405                                      const TargetRegisterInfo *TRI);
406 
407 MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
408                               unsigned Reg, const StackOffset &Offset,
409                               bool LastAdjustmentWasScalable = true);
410 MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
411                                  const StackOffset &OffsetFromDefCFA);
412 
413 /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
414 /// plus Offset.  This is intended to be used from within the prolog/epilog
415 /// insertion (PEI) pass, where a virtual scratch register may be allocated
416 /// if necessary, to be replaced by the scavenger at the end of PEI.
417 void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
418                      const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
419                      StackOffset Offset, const TargetInstrInfo *TII,
420                      MachineInstr::MIFlag = MachineInstr::NoFlags,
421                      bool SetNZCV = false, bool NeedsWinCFI = false,
422                      bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
423                      StackOffset InitialOffset = {},
424                      unsigned FrameReg = AArch64::SP);
425 
426 /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
427 /// FP. Return false if the offset could not be handled directly in MI, and
428 /// return the left-over portion by reference.
429 bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
430                               unsigned FrameReg, StackOffset &Offset,
431                               const AArch64InstrInfo *TII);
432 
433 /// Use to report the frame offset status in isAArch64FrameOffsetLegal.
434 enum AArch64FrameOffsetStatus {
435   AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
436   AArch64FrameOffsetIsLegal = 0x1,      ///< Offset is legal.
437   AArch64FrameOffsetCanUpdate = 0x2     ///< Offset can apply, at least partly.
438 };
439 
440 /// Check if the @p Offset is a valid frame offset for @p MI.
441 /// The returned value reports the validity of the frame offset for @p MI.
442 /// It uses the values defined by AArch64FrameOffsetStatus for that.
443 /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
444 /// use an offset.eq
445 /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
446 /// rewritten in @p MI.
447 /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
448 /// amount that is off the limit of the legal offset.
449 /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
450 /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
451 /// If set, @p EmittableOffset contains the amount that can be set in @p MI
452 /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
453 /// is a legal offset.
454 int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
455                               bool *OutUseUnscaledOp = nullptr,
456                               unsigned *OutUnscaledOp = nullptr,
457                               int64_t *EmittableOffset = nullptr);
458 
459 static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
460 
461 static inline bool isCondBranchOpcode(int Opc) {
462   switch (Opc) {
463   case AArch64::Bcc:
464   case AArch64::CBZW:
465   case AArch64::CBZX:
466   case AArch64::CBNZW:
467   case AArch64::CBNZX:
468   case AArch64::TBZW:
469   case AArch64::TBZX:
470   case AArch64::TBNZW:
471   case AArch64::TBNZX:
472     return true;
473   default:
474     return false;
475   }
476 }
477 
478 static inline bool isIndirectBranchOpcode(int Opc) {
479   switch (Opc) {
480   case AArch64::BR:
481   case AArch64::BRAA:
482   case AArch64::BRAB:
483   case AArch64::BRAAZ:
484   case AArch64::BRABZ:
485     return true;
486   }
487   return false;
488 }
489 
490 static inline bool isPTrueOpcode(unsigned Opc) {
491   switch (Opc) {
492   case AArch64::PTRUE_B:
493   case AArch64::PTRUE_H:
494   case AArch64::PTRUE_S:
495   case AArch64::PTRUE_D:
496     return true;
497   default:
498     return false;
499   }
500 }
501 
502 /// Return opcode to be used for indirect calls.
503 unsigned getBLRCallOpcode(const MachineFunction &MF);
504 
505 // struct TSFlags {
506 #define TSFLAG_ELEMENT_SIZE_TYPE(X)      (X)       // 3-bits
507 #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
508 #define TSFLAG_FALSE_LANE_TYPE(X)       ((X) << 7) // 2-bits
509 #define TSFLAG_INSTR_FLAGS(X)           ((X) << 9) // 2-bits
510 // }
511 
512 namespace AArch64 {
513 
514 enum ElementSizeType {
515   ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
516   ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
517   ElementSizeB    = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
518   ElementSizeH    = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
519   ElementSizeS    = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
520   ElementSizeD    = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
521 };
522 
523 enum DestructiveInstType {
524   DestructiveInstTypeMask       = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf),
525   NotDestructive                = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
526   DestructiveOther              = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
527   DestructiveUnary              = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2),
528   DestructiveBinaryImm          = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3),
529   DestructiveBinaryShImmUnpred  = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4),
530   DestructiveBinary             = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5),
531   DestructiveBinaryComm         = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6),
532   DestructiveBinaryCommWithRev  = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7),
533   DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8),
534   DestructiveUnaryPassthru      = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9),
535 };
536 
537 enum FalseLaneType {
538   FalseLanesMask  = TSFLAG_FALSE_LANE_TYPE(0x3),
539   FalseLanesZero  = TSFLAG_FALSE_LANE_TYPE(0x1),
540   FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2),
541 };
542 
543 // NOTE: This is a bit field.
544 static const uint64_t InstrFlagIsWhile     = TSFLAG_INSTR_FLAGS(0x1);
545 static const uint64_t InstrFlagIsPTestLike = TSFLAG_INSTR_FLAGS(0x2);
546 
547 #undef TSFLAG_ELEMENT_SIZE_TYPE
548 #undef TSFLAG_DESTRUCTIVE_INST_TYPE
549 #undef TSFLAG_FALSE_LANE_TYPE
550 #undef TSFLAG_INSTR_FLAGS
551 
552 int getSVEPseudoMap(uint16_t Opcode);
553 int getSVERevInstr(uint16_t Opcode);
554 int getSVENonRevInstr(uint16_t Opcode);
555 }
556 
557 } // end namespace llvm
558 
559 #endif
560