1 //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15
16 #include "AArch64.h"
17 #include "AArch64RegisterInfo.h"
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/CodeGen/MachineCombinerPattern.h"
20 #include "llvm/CodeGen/TargetInstrInfo.h"
21 #include "llvm/Support/TypeSize.h"
22
23 #define GET_INSTRINFO_HEADER
24 #include "AArch64GenInstrInfo.inc"
25
26 namespace llvm {
27
28 class AArch64Subtarget;
29 class AArch64TargetMachine;
30
31 static const MachineMemOperand::Flags MOSuppressPair =
32 MachineMemOperand::MOTargetFlag1;
33 static const MachineMemOperand::Flags MOStridedAccess =
34 MachineMemOperand::MOTargetFlag2;
35
36 #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
37
38 class AArch64InstrInfo final : public AArch64GenInstrInfo {
39 const AArch64RegisterInfo RI;
40 const AArch64Subtarget &Subtarget;
41
42 public:
43 explicit AArch64InstrInfo(const AArch64Subtarget &STI);
44
45 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
46 /// such, whenever a client has an instance of instruction info, it should
47 /// always be able to get register info as well (through this method).
getRegisterInfo()48 const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
49
50 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
51
52 bool isAsCheapAsAMove(const MachineInstr &MI) const override;
53
54 bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
55 Register &DstReg, unsigned &SubIdx) const override;
56
57 bool
58 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
59 const MachineInstr &MIb) const override;
60
61 unsigned isLoadFromStackSlot(const MachineInstr &MI,
62 int &FrameIndex) const override;
63 unsigned isStoreToStackSlot(const MachineInstr &MI,
64 int &FrameIndex) const override;
65
66 /// Does this instruction set its full destination register to zero?
67 static bool isGPRZero(const MachineInstr &MI);
68
69 /// Does this instruction rename a GPR without modifying bits?
70 static bool isGPRCopy(const MachineInstr &MI);
71
72 /// Does this instruction rename an FPR without modifying bits?
73 static bool isFPRCopy(const MachineInstr &MI);
74
75 /// Return true if pairing the given load or store is hinted to be
76 /// unprofitable.
77 static bool isLdStPairSuppressed(const MachineInstr &MI);
78
79 /// Return true if the given load or store is a strided memory access.
80 static bool isStridedAccess(const MachineInstr &MI);
81
82 /// Return true if it has an unscaled load/store offset.
83 static bool hasUnscaledLdStOffset(unsigned Opc);
hasUnscaledLdStOffset(MachineInstr & MI)84 static bool hasUnscaledLdStOffset(MachineInstr &MI) {
85 return hasUnscaledLdStOffset(MI.getOpcode());
86 }
87
88 /// Returns the unscaled load/store for the scaled load/store opcode,
89 /// if there is a corresponding unscaled variant available.
90 static Optional<unsigned> getUnscaledLdSt(unsigned Opc);
91
92 /// Scaling factor for (scaled or unscaled) load or store.
93 static int getMemScale(unsigned Opc);
getMemScale(const MachineInstr & MI)94 static int getMemScale(const MachineInstr &MI) {
95 return getMemScale(MI.getOpcode());
96 }
97
98 /// Returns whether the instruction is a pre-indexed load.
99 static bool isPreLd(const MachineInstr &MI);
100
101 /// Returns whether the instruction is a pre-indexed store.
102 static bool isPreSt(const MachineInstr &MI);
103
104 /// Returns whether the instruction is a pre-indexed load/store.
105 static bool isPreLdSt(const MachineInstr &MI);
106
107 /// Returns the index for the immediate for a given instruction.
108 static unsigned getLoadStoreImmIdx(unsigned Opc);
109
110 /// Return true if pairing the given load or store may be paired with another.
111 static bool isPairableLdStInst(const MachineInstr &MI);
112
113 /// Return the opcode that set flags when possible. The caller is
114 /// responsible for ensuring the opc has a flag setting equivalent.
115 static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit);
116
117 /// Return true if this is a load/store that can be potentially paired/merged.
118 bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
119
120 /// Hint that pairing the given load or store is unprofitable.
121 static void suppressLdStPair(MachineInstr &MI);
122
123 Optional<ExtAddrMode>
124 getAddrModeFromMemoryOp(const MachineInstr &MemI,
125 const TargetRegisterInfo *TRI) const override;
126
127 bool getMemOperandsWithOffsetWidth(
128 const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
129 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
130 const TargetRegisterInfo *TRI) const override;
131
132 /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
133 /// This is true for some SVE instructions like ldr/str that have a
134 /// 'reg + imm' addressing mode where the immediate is an index to the
135 /// scalable vector located at 'reg + imm * vscale x #bytes'.
136 bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
137 const MachineOperand *&BaseOp,
138 int64_t &Offset, bool &OffsetIsScalable,
139 unsigned &Width,
140 const TargetRegisterInfo *TRI) const;
141
142 /// Return the immediate offset of the base register in a load/store \p LdSt.
143 MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
144
145 /// Returns true if opcode \p Opc is a memory operation. If it is, set
146 /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
147 ///
148 /// For unscaled instructions, \p Scale is set to 1.
149 static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, unsigned &Width,
150 int64_t &MinOffset, int64_t &MaxOffset);
151
152 bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
153 ArrayRef<const MachineOperand *> BaseOps2,
154 unsigned NumLoads, unsigned NumBytes) const override;
155
156 void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
157 const DebugLoc &DL, MCRegister DestReg,
158 MCRegister SrcReg, bool KillSrc, unsigned Opcode,
159 llvm::ArrayRef<unsigned> Indices) const;
160 void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
161 DebugLoc DL, unsigned DestReg, unsigned SrcReg,
162 bool KillSrc, unsigned Opcode, unsigned ZeroReg,
163 llvm::ArrayRef<unsigned> Indices) const;
164 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
165 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
166 bool KillSrc) const override;
167
168 void storeRegToStackSlot(MachineBasicBlock &MBB,
169 MachineBasicBlock::iterator MBBI, Register SrcReg,
170 bool isKill, int FrameIndex,
171 const TargetRegisterClass *RC,
172 const TargetRegisterInfo *TRI) const override;
173
174 void loadRegFromStackSlot(MachineBasicBlock &MBB,
175 MachineBasicBlock::iterator MBBI, Register DestReg,
176 int FrameIndex, const TargetRegisterClass *RC,
177 const TargetRegisterInfo *TRI) const override;
178
179 // This tells target independent code that it is okay to pass instructions
180 // with subreg operands to foldMemoryOperandImpl.
isSubregFoldable()181 bool isSubregFoldable() const override { return true; }
182
183 using TargetInstrInfo::foldMemoryOperandImpl;
184 MachineInstr *
185 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
186 ArrayRef<unsigned> Ops,
187 MachineBasicBlock::iterator InsertPt, int FrameIndex,
188 LiveIntervals *LIS = nullptr,
189 VirtRegMap *VRM = nullptr) const override;
190
191 /// \returns true if a branch from an instruction with opcode \p BranchOpc
192 /// bytes is capable of jumping to a position \p BrOffset bytes away.
193 bool isBranchOffsetInRange(unsigned BranchOpc,
194 int64_t BrOffset) const override;
195
196 MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
197
198 bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
199 MachineBasicBlock *&FBB,
200 SmallVectorImpl<MachineOperand> &Cond,
201 bool AllowModify = false) const override;
202 bool analyzeBranchPredicate(MachineBasicBlock &MBB,
203 MachineBranchPredicate &MBP,
204 bool AllowModify) const override;
205 unsigned removeBranch(MachineBasicBlock &MBB,
206 int *BytesRemoved = nullptr) const override;
207 unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
208 MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
209 const DebugLoc &DL,
210 int *BytesAdded = nullptr) const override;
211 bool
212 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
213 bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
214 Register, Register, Register, int &, int &,
215 int &) const override;
216 void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
217 const DebugLoc &DL, Register DstReg,
218 ArrayRef<MachineOperand> Cond, Register TrueReg,
219 Register FalseReg) const override;
220 MCInst getNop() const override;
221
222 bool isSchedulingBoundary(const MachineInstr &MI,
223 const MachineBasicBlock *MBB,
224 const MachineFunction &MF) const override;
225
226 /// analyzeCompare - For a comparison instruction, return the source registers
227 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
228 /// Return true if the comparison instruction can be analyzed.
229 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
230 Register &SrcReg2, int &CmpMask,
231 int &CmpValue) const override;
232 /// optimizeCompareInstr - Convert the instruction supplying the argument to
233 /// the comparison into one that sets the zero bit in the flags register.
234 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
235 Register SrcReg2, int CmpMask, int CmpValue,
236 const MachineRegisterInfo *MRI) const override;
237 bool optimizeCondBranch(MachineInstr &MI) const override;
238
239 /// Return true when a code sequence can improve throughput. It
240 /// should be called only for instructions in loops.
241 /// \param Pattern - combiner pattern
242 bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
243 /// Return true when there is potentially a faster code sequence
244 /// for an instruction chain ending in ``Root``. All potential patterns are
245 /// listed in the ``Patterns`` array.
246 bool
247 getMachineCombinerPatterns(MachineInstr &Root,
248 SmallVectorImpl<MachineCombinerPattern> &Patterns,
249 bool DoRegPressureReduce) const override;
250 /// Return true when Inst is associative and commutative so that it can be
251 /// reassociated.
252 bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
253 /// When getMachineCombinerPatterns() finds patterns, this function generates
254 /// the instructions that could replace the original code sequence
255 void genAlternativeCodeSequence(
256 MachineInstr &Root, MachineCombinerPattern Pattern,
257 SmallVectorImpl<MachineInstr *> &InsInstrs,
258 SmallVectorImpl<MachineInstr *> &DelInstrs,
259 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
260 /// AArch64 supports MachineCombiner.
261 bool useMachineCombiner() const override;
262
263 bool expandPostRAPseudo(MachineInstr &MI) const override;
264
265 std::pair<unsigned, unsigned>
266 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
267 ArrayRef<std::pair<unsigned, const char *>>
268 getSerializableDirectMachineOperandTargetFlags() const override;
269 ArrayRef<std::pair<unsigned, const char *>>
270 getSerializableBitmaskMachineOperandTargetFlags() const override;
271 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
272 getSerializableMachineMemOperandTargetFlags() const override;
273
274 bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
275 bool OutlineFromLinkOnceODRs) const override;
276 outliner::OutlinedFunction getOutliningCandidateInfo(
277 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
278 outliner::InstrType
279 getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
280 bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
281 unsigned &Flags) const override;
282 void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
283 const outliner::OutlinedFunction &OF) const override;
284 MachineBasicBlock::iterator
285 insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
286 MachineBasicBlock::iterator &It, MachineFunction &MF,
287 const outliner::Candidate &C) const override;
288 bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
289 /// Returns the vector element size (B, H, S or D) of an SVE opcode.
290 uint64_t getElementSizeForOpcode(unsigned Opc) const;
291 /// Returns true if the opcode is for an SVE instruction that sets the
292 /// condition codes as if it's results had been fed to a PTEST instruction
293 /// along with the same general predicate.
294 bool isPTestLikeOpcode(unsigned Opc) const;
295 /// Returns true if the opcode is for an SVE WHILE## instruction.
296 bool isWhileOpcode(unsigned Opc) const;
297 /// Returns true if the instruction has a shift by immediate that can be
298 /// executed in one cycle less.
299 static bool isFalkorShiftExtFast(const MachineInstr &MI);
300 /// Return true if the instructions is a SEH instruciton used for unwinding
301 /// on Windows.
302 static bool isSEHInstruction(const MachineInstr &MI);
303
304 Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
305 Register Reg) const override;
306
307 Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
308 Register Reg) const override;
309
310 unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override;
311
312 bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
313 MachineRegisterInfo &MRI) const override;
314
315 static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset,
316 int64_t &NumBytes,
317 int64_t &NumPredicateVectors,
318 int64_t &NumDataVectors);
319 static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset,
320 int64_t &ByteSized,
321 int64_t &VGSized);
322 #define GET_INSTRINFO_HELPER_DECLS
323 #include "AArch64GenInstrInfo.inc"
324
325 protected:
326 /// If the specific machine instruction is an instruction that moves/copies
327 /// value from one register to another register return destination and source
328 /// registers as machine operands.
329 Optional<DestSourcePair>
330 isCopyInstrImpl(const MachineInstr &MI) const override;
331
332 private:
333 unsigned getInstBundleLength(const MachineInstr &MI) const;
334
335 /// Sets the offsets on outlined instructions in \p MBB which use SP
336 /// so that they will be valid post-outlining.
337 ///
338 /// \param MBB A \p MachineBasicBlock in an outlined function.
339 void fixupPostOutline(MachineBasicBlock &MBB) const;
340
341 void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
342 MachineBasicBlock *TBB,
343 ArrayRef<MachineOperand> Cond) const;
344 bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
345 const MachineRegisterInfo &MRI) const;
346 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
347 int CmpValue, const MachineRegisterInfo &MRI) const;
348
349 /// Returns an unused general-purpose register which can be used for
350 /// constructing an outlined call if one exists. Returns 0 otherwise.
351 unsigned findRegisterToSaveLRTo(const outliner::Candidate &C) const;
352
353 /// Remove a ptest of a predicate-generating operation that already sets, or
354 /// can be made to set, the condition codes in an identical manner
355 bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
356 unsigned PredReg,
357 const MachineRegisterInfo *MRI) const;
358 };
359
360 /// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
361 /// which either reads or clobbers NZCV.
362 bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
363 const MachineInstr &UseMI,
364 const TargetRegisterInfo *TRI);
365
366 /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
367 /// plus Offset. This is intended to be used from within the prolog/epilog
368 /// insertion (PEI) pass, where a virtual scratch register may be allocated
369 /// if necessary, to be replaced by the scavenger at the end of PEI.
370 void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
371 const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
372 StackOffset Offset, const TargetInstrInfo *TII,
373 MachineInstr::MIFlag = MachineInstr::NoFlags,
374 bool SetNZCV = false, bool NeedsWinCFI = false,
375 bool *HasWinCFI = nullptr);
376
377 /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
378 /// FP. Return false if the offset could not be handled directly in MI, and
379 /// return the left-over portion by reference.
380 bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
381 unsigned FrameReg, StackOffset &Offset,
382 const AArch64InstrInfo *TII);
383
384 /// Use to report the frame offset status in isAArch64FrameOffsetLegal.
385 enum AArch64FrameOffsetStatus {
386 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
387 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
388 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
389 };
390
391 /// Check if the @p Offset is a valid frame offset for @p MI.
392 /// The returned value reports the validity of the frame offset for @p MI.
393 /// It uses the values defined by AArch64FrameOffsetStatus for that.
394 /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
395 /// use an offset.eq
396 /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
397 /// rewritten in @p MI.
398 /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
399 /// amount that is off the limit of the legal offset.
400 /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
401 /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
402 /// If set, @p EmittableOffset contains the amount that can be set in @p MI
403 /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
404 /// is a legal offset.
405 int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
406 bool *OutUseUnscaledOp = nullptr,
407 unsigned *OutUnscaledOp = nullptr,
408 int64_t *EmittableOffset = nullptr);
409
isUncondBranchOpcode(int Opc)410 static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
411
isCondBranchOpcode(int Opc)412 static inline bool isCondBranchOpcode(int Opc) {
413 switch (Opc) {
414 case AArch64::Bcc:
415 case AArch64::CBZW:
416 case AArch64::CBZX:
417 case AArch64::CBNZW:
418 case AArch64::CBNZX:
419 case AArch64::TBZW:
420 case AArch64::TBZX:
421 case AArch64::TBNZW:
422 case AArch64::TBNZX:
423 return true;
424 default:
425 return false;
426 }
427 }
428
isIndirectBranchOpcode(int Opc)429 static inline bool isIndirectBranchOpcode(int Opc) {
430 switch (Opc) {
431 case AArch64::BR:
432 case AArch64::BRAA:
433 case AArch64::BRAB:
434 case AArch64::BRAAZ:
435 case AArch64::BRABZ:
436 return true;
437 }
438 return false;
439 }
440
isPTrueOpcode(unsigned Opc)441 static inline bool isPTrueOpcode(unsigned Opc) {
442 switch (Opc) {
443 case AArch64::PTRUE_B:
444 case AArch64::PTRUE_H:
445 case AArch64::PTRUE_S:
446 case AArch64::PTRUE_D:
447 return true;
448 default:
449 return false;
450 }
451 }
452
453 /// Return opcode to be used for indirect calls.
454 unsigned getBLRCallOpcode(const MachineFunction &MF);
455
456 // struct TSFlags {
457 #define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
458 #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
459 #define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
460 #define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
461 // }
462
463 namespace AArch64 {
464
465 enum ElementSizeType {
466 ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
467 ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
468 ElementSizeB = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
469 ElementSizeH = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
470 ElementSizeS = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
471 ElementSizeD = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
472 };
473
474 enum DestructiveInstType {
475 DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf),
476 NotDestructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
477 DestructiveOther = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
478 DestructiveUnary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2),
479 DestructiveBinaryImm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3),
480 DestructiveBinaryShImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4),
481 DestructiveBinary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5),
482 DestructiveBinaryComm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6),
483 DestructiveBinaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7),
484 DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8),
485 DestructiveUnaryPassthru = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9),
486 };
487
488 enum FalseLaneType {
489 FalseLanesMask = TSFLAG_FALSE_LANE_TYPE(0x3),
490 FalseLanesZero = TSFLAG_FALSE_LANE_TYPE(0x1),
491 FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2),
492 };
493
494 // NOTE: This is a bit field.
495 static const uint64_t InstrFlagIsWhile = TSFLAG_INSTR_FLAGS(0x1);
496 static const uint64_t InstrFlagIsPTestLike = TSFLAG_INSTR_FLAGS(0x2);
497
498 #undef TSFLAG_ELEMENT_SIZE_TYPE
499 #undef TSFLAG_DESTRUCTIVE_INST_TYPE
500 #undef TSFLAG_FALSE_LANE_TYPE
501 #undef TSFLAG_INSTR_FLAGS
502
503 int getSVEPseudoMap(uint16_t Opcode);
504 int getSVERevInstr(uint16_t Opcode);
505 int getSVENonRevInstr(uint16_t Opcode);
506 }
507
508 } // end namespace llvm
509
510 #endif
511