1 //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15
16 #include "AArch64.h"
17 #include "AArch64RegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/Support/TypeSize.h"
20
21 #define GET_INSTRINFO_HEADER
22 #include "AArch64GenInstrInfo.inc"
23
24 namespace llvm {
25
26 class AArch64Subtarget;
27
28 static const MachineMemOperand::Flags MOSuppressPair =
29 MachineMemOperand::MOTargetFlag1;
30 static const MachineMemOperand::Flags MOStridedAccess =
31 MachineMemOperand::MOTargetFlag2;
32
33 #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
34
35 class AArch64InstrInfo final : public AArch64GenInstrInfo {
36 const AArch64RegisterInfo RI;
37 const AArch64Subtarget &Subtarget;
38
39 public:
40 explicit AArch64InstrInfo(const AArch64Subtarget &STI);
41
42 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
43 /// such, whenever a client has an instance of instruction info, it should
44 /// always be able to get register info as well (through this method).
getRegisterInfo()45 const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
46
47 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
48
49 bool isAsCheapAsAMove(const MachineInstr &MI) const override;
50
51 bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
52 Register &DstReg, unsigned &SubIdx) const override;
53
54 bool
55 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
56 const MachineInstr &MIb) const override;
57
58 unsigned isLoadFromStackSlot(const MachineInstr &MI,
59 int &FrameIndex) const override;
60 unsigned isStoreToStackSlot(const MachineInstr &MI,
61 int &FrameIndex) const override;
62
63 /// Does this instruction set its full destination register to zero?
64 static bool isGPRZero(const MachineInstr &MI);
65
66 /// Does this instruction rename a GPR without modifying bits?
67 static bool isGPRCopy(const MachineInstr &MI);
68
69 /// Does this instruction rename an FPR without modifying bits?
70 static bool isFPRCopy(const MachineInstr &MI);
71
72 /// Return true if pairing the given load or store is hinted to be
73 /// unprofitable.
74 static bool isLdStPairSuppressed(const MachineInstr &MI);
75
76 /// Return true if the given load or store is a strided memory access.
77 static bool isStridedAccess(const MachineInstr &MI);
78
79 /// Return true if it has an unscaled load/store offset.
80 static bool hasUnscaledLdStOffset(unsigned Opc);
hasUnscaledLdStOffset(MachineInstr & MI)81 static bool hasUnscaledLdStOffset(MachineInstr &MI) {
82 return hasUnscaledLdStOffset(MI.getOpcode());
83 }
84
85 /// Returns the unscaled load/store for the scaled load/store opcode,
86 /// if there is a corresponding unscaled variant available.
87 static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
88
89 /// Scaling factor for (scaled or unscaled) load or store.
90 static int getMemScale(unsigned Opc);
getMemScale(const MachineInstr & MI)91 static int getMemScale(const MachineInstr &MI) {
92 return getMemScale(MI.getOpcode());
93 }
94
95 /// Returns whether the instruction is a pre-indexed load.
96 static bool isPreLd(const MachineInstr &MI);
97
98 /// Returns whether the instruction is a pre-indexed store.
99 static bool isPreSt(const MachineInstr &MI);
100
101 /// Returns whether the instruction is a pre-indexed load/store.
102 static bool isPreLdSt(const MachineInstr &MI);
103
104 /// Returns whether the instruction is a paired load/store.
105 static bool isPairedLdSt(const MachineInstr &MI);
106
107 /// Returns the base register operator of a load/store.
108 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
109
110 /// Returns the the immediate offset operator of a load/store.
111 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
112
113 /// Returns whether the instruction is FP or NEON.
114 static bool isFpOrNEON(const MachineInstr &MI);
115
116 /// Returns whether the instruction is in Q form (128 bit operands)
117 static bool isQForm(const MachineInstr &MI);
118
119 /// Returns the index for the immediate for a given instruction.
120 static unsigned getLoadStoreImmIdx(unsigned Opc);
121
122 /// Return true if pairing the given load or store may be paired with another.
123 static bool isPairableLdStInst(const MachineInstr &MI);
124
125 /// Return the opcode that set flags when possible. The caller is
126 /// responsible for ensuring the opc has a flag setting equivalent.
127 static unsigned convertToFlagSettingOpc(unsigned Opc);
128
129 /// Return true if this is a load/store that can be potentially paired/merged.
130 bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
131
132 /// Hint that pairing the given load or store is unprofitable.
133 static void suppressLdStPair(MachineInstr &MI);
134
135 std::optional<ExtAddrMode>
136 getAddrModeFromMemoryOp(const MachineInstr &MemI,
137 const TargetRegisterInfo *TRI) const override;
138
139 bool getMemOperandsWithOffsetWidth(
140 const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
141 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
142 const TargetRegisterInfo *TRI) const override;
143
144 /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
145 /// This is true for some SVE instructions like ldr/str that have a
146 /// 'reg + imm' addressing mode where the immediate is an index to the
147 /// scalable vector located at 'reg + imm * vscale x #bytes'.
148 bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
149 const MachineOperand *&BaseOp,
150 int64_t &Offset, bool &OffsetIsScalable,
151 unsigned &Width,
152 const TargetRegisterInfo *TRI) const;
153
154 /// Return the immediate offset of the base register in a load/store \p LdSt.
155 MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
156
157 /// Returns true if opcode \p Opc is a memory operation. If it is, set
158 /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
159 ///
160 /// For unscaled instructions, \p Scale is set to 1.
161 static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, unsigned &Width,
162 int64_t &MinOffset, int64_t &MaxOffset);
163
164 bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
165 ArrayRef<const MachineOperand *> BaseOps2,
166 unsigned NumLoads, unsigned NumBytes) const override;
167
168 void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
169 const DebugLoc &DL, MCRegister DestReg,
170 MCRegister SrcReg, bool KillSrc, unsigned Opcode,
171 llvm::ArrayRef<unsigned> Indices) const;
172 void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
173 DebugLoc DL, unsigned DestReg, unsigned SrcReg,
174 bool KillSrc, unsigned Opcode, unsigned ZeroReg,
175 llvm::ArrayRef<unsigned> Indices) const;
176 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
177 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
178 bool KillSrc) const override;
179
180 void storeRegToStackSlot(MachineBasicBlock &MBB,
181 MachineBasicBlock::iterator MBBI, Register SrcReg,
182 bool isKill, int FrameIndex,
183 const TargetRegisterClass *RC,
184 const TargetRegisterInfo *TRI,
185 Register VReg) const override;
186
187 void loadRegFromStackSlot(MachineBasicBlock &MBB,
188 MachineBasicBlock::iterator MBBI, Register DestReg,
189 int FrameIndex, const TargetRegisterClass *RC,
190 const TargetRegisterInfo *TRI,
191 Register VReg) const override;
192
193 // This tells target independent code that it is okay to pass instructions
194 // with subreg operands to foldMemoryOperandImpl.
isSubregFoldable()195 bool isSubregFoldable() const override { return true; }
196
197 using TargetInstrInfo::foldMemoryOperandImpl;
198 MachineInstr *
199 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
200 ArrayRef<unsigned> Ops,
201 MachineBasicBlock::iterator InsertPt, int FrameIndex,
202 LiveIntervals *LIS = nullptr,
203 VirtRegMap *VRM = nullptr) const override;
204
205 /// \returns true if a branch from an instruction with opcode \p BranchOpc
206 /// bytes is capable of jumping to a position \p BrOffset bytes away.
207 bool isBranchOffsetInRange(unsigned BranchOpc,
208 int64_t BrOffset) const override;
209
210 MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
211
212 bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
213 MachineBasicBlock *&FBB,
214 SmallVectorImpl<MachineOperand> &Cond,
215 bool AllowModify = false) const override;
216 bool analyzeBranchPredicate(MachineBasicBlock &MBB,
217 MachineBranchPredicate &MBP,
218 bool AllowModify) const override;
219 unsigned removeBranch(MachineBasicBlock &MBB,
220 int *BytesRemoved = nullptr) const override;
221 unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
222 MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
223 const DebugLoc &DL,
224 int *BytesAdded = nullptr) const override;
225 bool
226 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
227 bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
228 Register, Register, Register, int &, int &,
229 int &) const override;
230 void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
231 const DebugLoc &DL, Register DstReg,
232 ArrayRef<MachineOperand> Cond, Register TrueReg,
233 Register FalseReg) const override;
234 MCInst getNop() const override;
235
236 bool isSchedulingBoundary(const MachineInstr &MI,
237 const MachineBasicBlock *MBB,
238 const MachineFunction &MF) const override;
239
240 /// analyzeCompare - For a comparison instruction, return the source registers
241 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
242 /// Return true if the comparison instruction can be analyzed.
243 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
244 Register &SrcReg2, int64_t &CmpMask,
245 int64_t &CmpValue) const override;
246 /// optimizeCompareInstr - Convert the instruction supplying the argument to
247 /// the comparison into one that sets the zero bit in the flags register.
248 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
249 Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
250 const MachineRegisterInfo *MRI) const override;
251 bool optimizeCondBranch(MachineInstr &MI) const override;
252
253 /// Return true when a code sequence can improve throughput. It
254 /// should be called only for instructions in loops.
255 /// \param Pattern - combiner pattern
256 bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
257 /// Return true when there is potentially a faster code sequence
258 /// for an instruction chain ending in ``Root``. All potential patterns are
259 /// listed in the ``Patterns`` array.
260 bool
261 getMachineCombinerPatterns(MachineInstr &Root,
262 SmallVectorImpl<MachineCombinerPattern> &Patterns,
263 bool DoRegPressureReduce) const override;
264 /// Return true when Inst is associative and commutative so that it can be
265 /// reassociated. If Invert is true, then the inverse of Inst operation must
266 /// be checked.
267 bool isAssociativeAndCommutative(const MachineInstr &Inst,
268 bool Invert) const override;
269 /// When getMachineCombinerPatterns() finds patterns, this function generates
270 /// the instructions that could replace the original code sequence
271 void genAlternativeCodeSequence(
272 MachineInstr &Root, MachineCombinerPattern Pattern,
273 SmallVectorImpl<MachineInstr *> &InsInstrs,
274 SmallVectorImpl<MachineInstr *> &DelInstrs,
275 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
276 /// AArch64 supports MachineCombiner.
277 bool useMachineCombiner() const override;
278
279 bool expandPostRAPseudo(MachineInstr &MI) const override;
280
281 std::pair<unsigned, unsigned>
282 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
283 ArrayRef<std::pair<unsigned, const char *>>
284 getSerializableDirectMachineOperandTargetFlags() const override;
285 ArrayRef<std::pair<unsigned, const char *>>
286 getSerializableBitmaskMachineOperandTargetFlags() const override;
287 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
288 getSerializableMachineMemOperandTargetFlags() const override;
289
290 bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
291 bool OutlineFromLinkOnceODRs) const override;
292 outliner::OutlinedFunction getOutliningCandidateInfo(
293 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
294 outliner::InstrType
295 getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
296 bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
297 unsigned &Flags) const override;
298 void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
299 const outliner::OutlinedFunction &OF) const override;
300 MachineBasicBlock::iterator
301 insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
302 MachineBasicBlock::iterator &It, MachineFunction &MF,
303 outliner::Candidate &C) const override;
304 bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
305 /// Returns the vector element size (B, H, S or D) of an SVE opcode.
306 uint64_t getElementSizeForOpcode(unsigned Opc) const;
307 /// Returns true if the opcode is for an SVE instruction that sets the
308 /// condition codes as if it's results had been fed to a PTEST instruction
309 /// along with the same general predicate.
310 bool isPTestLikeOpcode(unsigned Opc) const;
311 /// Returns true if the opcode is for an SVE WHILE## instruction.
312 bool isWhileOpcode(unsigned Opc) const;
313 /// Returns true if the instruction has a shift by immediate that can be
314 /// executed in one cycle less.
315 static bool isFalkorShiftExtFast(const MachineInstr &MI);
316 /// Return true if the instructions is a SEH instruciton used for unwinding
317 /// on Windows.
318 static bool isSEHInstruction(const MachineInstr &MI);
319
320 std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
321 Register Reg) const override;
322
323 std::optional<ParamLoadedValue>
324 describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
325
326 unsigned int getTailDuplicateSize(CodeGenOpt::Level OptLevel) const override;
327
328 bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
329 MachineRegisterInfo &MRI) const override;
330
331 static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset,
332 int64_t &NumBytes,
333 int64_t &NumPredicateVectors,
334 int64_t &NumDataVectors);
335 static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset,
336 int64_t &ByteSized,
337 int64_t &VGSized);
338 #define GET_INSTRINFO_HELPER_DECLS
339 #include "AArch64GenInstrInfo.inc"
340
341 protected:
342 /// If the specific machine instruction is an instruction that moves/copies
343 /// value from one register to another register return destination and source
344 /// registers as machine operands.
345 std::optional<DestSourcePair>
346 isCopyInstrImpl(const MachineInstr &MI) const override;
347
348 private:
349 unsigned getInstBundleLength(const MachineInstr &MI) const;
350
351 /// Sets the offsets on outlined instructions in \p MBB which use SP
352 /// so that they will be valid post-outlining.
353 ///
354 /// \param MBB A \p MachineBasicBlock in an outlined function.
355 void fixupPostOutline(MachineBasicBlock &MBB) const;
356
357 void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
358 MachineBasicBlock *TBB,
359 ArrayRef<MachineOperand> Cond) const;
360 bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
361 const MachineRegisterInfo &MRI) const;
362 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
363 int CmpValue, const MachineRegisterInfo &MRI) const;
364
365 /// Returns an unused general-purpose register which can be used for
366 /// constructing an outlined call if one exists. Returns 0 otherwise.
367 Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
368
369 /// Remove a ptest of a predicate-generating operation that already sets, or
370 /// can be made to set, the condition codes in an identical manner
371 bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
372 unsigned PredReg,
373 const MachineRegisterInfo *MRI) const;
374 };
375
376 struct UsedNZCV {
377 bool N = false;
378 bool Z = false;
379 bool C = false;
380 bool V = false;
381
382 UsedNZCV() = default;
383
384 UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
385 this->N |= UsedFlags.N;
386 this->Z |= UsedFlags.Z;
387 this->C |= UsedFlags.C;
388 this->V |= UsedFlags.V;
389 return *this;
390 }
391 };
392
393 /// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
394 /// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
395 /// \returns std::nullopt otherwise.
396 ///
397 /// Collect instructions using that flags in \p CCUseInstrs if provided.
398 std::optional<UsedNZCV>
399 examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
400 const TargetRegisterInfo &TRI,
401 SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
402
403 /// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
404 /// which either reads or clobbers NZCV.
405 bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
406 const MachineInstr &UseMI,
407 const TargetRegisterInfo *TRI);
408
409 MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
410 unsigned Reg, const StackOffset &Offset,
411 bool LastAdjustmentWasScalable = true);
412 MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
413 const StackOffset &OffsetFromDefCFA);
414
415 /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
416 /// plus Offset. This is intended to be used from within the prolog/epilog
417 /// insertion (PEI) pass, where a virtual scratch register may be allocated
418 /// if necessary, to be replaced by the scavenger at the end of PEI.
419 void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
420 const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
421 StackOffset Offset, const TargetInstrInfo *TII,
422 MachineInstr::MIFlag = MachineInstr::NoFlags,
423 bool SetNZCV = false, bool NeedsWinCFI = false,
424 bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
425 StackOffset InitialOffset = {},
426 unsigned FrameReg = AArch64::SP);
427
428 /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
429 /// FP. Return false if the offset could not be handled directly in MI, and
430 /// return the left-over portion by reference.
431 bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
432 unsigned FrameReg, StackOffset &Offset,
433 const AArch64InstrInfo *TII);
434
435 /// Use to report the frame offset status in isAArch64FrameOffsetLegal.
436 enum AArch64FrameOffsetStatus {
437 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
438 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
439 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
440 };
441
442 /// Check if the @p Offset is a valid frame offset for @p MI.
443 /// The returned value reports the validity of the frame offset for @p MI.
444 /// It uses the values defined by AArch64FrameOffsetStatus for that.
445 /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
446 /// use an offset.eq
447 /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
448 /// rewritten in @p MI.
449 /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
450 /// amount that is off the limit of the legal offset.
451 /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
452 /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
453 /// If set, @p EmittableOffset contains the amount that can be set in @p MI
454 /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
455 /// is a legal offset.
456 int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
457 bool *OutUseUnscaledOp = nullptr,
458 unsigned *OutUnscaledOp = nullptr,
459 int64_t *EmittableOffset = nullptr);
460
isUncondBranchOpcode(int Opc)461 static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
462
isCondBranchOpcode(int Opc)463 static inline bool isCondBranchOpcode(int Opc) {
464 switch (Opc) {
465 case AArch64::Bcc:
466 case AArch64::CBZW:
467 case AArch64::CBZX:
468 case AArch64::CBNZW:
469 case AArch64::CBNZX:
470 case AArch64::TBZW:
471 case AArch64::TBZX:
472 case AArch64::TBNZW:
473 case AArch64::TBNZX:
474 return true;
475 default:
476 return false;
477 }
478 }
479
isIndirectBranchOpcode(int Opc)480 static inline bool isIndirectBranchOpcode(int Opc) {
481 switch (Opc) {
482 case AArch64::BR:
483 case AArch64::BRAA:
484 case AArch64::BRAB:
485 case AArch64::BRAAZ:
486 case AArch64::BRABZ:
487 return true;
488 }
489 return false;
490 }
491
isPTrueOpcode(unsigned Opc)492 static inline bool isPTrueOpcode(unsigned Opc) {
493 switch (Opc) {
494 case AArch64::PTRUE_B:
495 case AArch64::PTRUE_H:
496 case AArch64::PTRUE_S:
497 case AArch64::PTRUE_D:
498 return true;
499 default:
500 return false;
501 }
502 }
503
504 /// Return opcode to be used for indirect calls.
505 unsigned getBLRCallOpcode(const MachineFunction &MF);
506
507 /// Return XPAC opcode to be used for a ptrauth strip using the given key.
getXPACOpcodeForKey(AArch64PACKey::ID K)508 static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) {
509 using namespace AArch64PACKey;
510 switch (K) {
511 case IA: case IB: return AArch64::XPACI;
512 case DA: case DB: return AArch64::XPACD;
513 }
514 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
515 }
516
517 /// Return AUT opcode to be used for a ptrauth auth using the given key, or its
518 /// AUT*Z variant that doesn't take a discriminator operand, using zero instead.
getAUTOpcodeForKey(AArch64PACKey::ID K,bool Zero)519 static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
520 using namespace AArch64PACKey;
521 switch (K) {
522 case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
523 case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
524 case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
525 case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
526 }
527 }
528
529 /// Return PAC opcode to be used for a ptrauth sign using the given key, or its
530 /// PAC*Z variant that doesn't take a discriminator operand, using zero instead.
getPACOpcodeForKey(AArch64PACKey::ID K,bool Zero)531 static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
532 using namespace AArch64PACKey;
533 switch (K) {
534 case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA;
535 case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB;
536 case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA;
537 case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB;
538 }
539 }
540
541 // struct TSFlags {
542 #define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
543 #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
544 #define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
545 #define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
546 #define TSFLAG_SME_MATRIX_TYPE(X) ((X) << 11) // 3-bits
547 // }
548
549 namespace AArch64 {
550
551 enum ElementSizeType {
552 ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
553 ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
554 ElementSizeB = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
555 ElementSizeH = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
556 ElementSizeS = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
557 ElementSizeD = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
558 };
559
560 enum DestructiveInstType {
561 DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0xf),
562 NotDestructive = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
563 DestructiveOther = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
564 DestructiveUnary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x2),
565 DestructiveBinaryImm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x3),
566 DestructiveBinaryShImmUnpred = TSFLAG_DESTRUCTIVE_INST_TYPE(0x4),
567 DestructiveBinary = TSFLAG_DESTRUCTIVE_INST_TYPE(0x5),
568 DestructiveBinaryComm = TSFLAG_DESTRUCTIVE_INST_TYPE(0x6),
569 DestructiveBinaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x7),
570 DestructiveTernaryCommWithRev = TSFLAG_DESTRUCTIVE_INST_TYPE(0x8),
571 DestructiveUnaryPassthru = TSFLAG_DESTRUCTIVE_INST_TYPE(0x9),
572 };
573
574 enum FalseLaneType {
575 FalseLanesMask = TSFLAG_FALSE_LANE_TYPE(0x3),
576 FalseLanesZero = TSFLAG_FALSE_LANE_TYPE(0x1),
577 FalseLanesUndef = TSFLAG_FALSE_LANE_TYPE(0x2),
578 };
579
580 // NOTE: This is a bit field.
581 static const uint64_t InstrFlagIsWhile = TSFLAG_INSTR_FLAGS(0x1);
582 static const uint64_t InstrFlagIsPTestLike = TSFLAG_INSTR_FLAGS(0x2);
583
584 enum SMEMatrixType {
585 SMEMatrixTypeMask = TSFLAG_SME_MATRIX_TYPE(0x7),
586 SMEMatrixNone = TSFLAG_SME_MATRIX_TYPE(0x0),
587 SMEMatrixTileB = TSFLAG_SME_MATRIX_TYPE(0x1),
588 SMEMatrixTileH = TSFLAG_SME_MATRIX_TYPE(0x2),
589 SMEMatrixTileS = TSFLAG_SME_MATRIX_TYPE(0x3),
590 SMEMatrixTileD = TSFLAG_SME_MATRIX_TYPE(0x4),
591 SMEMatrixTileQ = TSFLAG_SME_MATRIX_TYPE(0x5),
592 SMEMatrixArray = TSFLAG_SME_MATRIX_TYPE(0x6),
593 };
594
595 #undef TSFLAG_ELEMENT_SIZE_TYPE
596 #undef TSFLAG_DESTRUCTIVE_INST_TYPE
597 #undef TSFLAG_FALSE_LANE_TYPE
598 #undef TSFLAG_INSTR_FLAGS
599 #undef TSFLAG_SME_MATRIX_TYPE
600
601 int getSVEPseudoMap(uint16_t Opcode);
602 int getSVERevInstr(uint16_t Opcode);
603 int getSVENonRevInstr(uint16_t Opcode);
604
605 int getSMEPseudoMap(uint16_t Opcode);
606 }
607
608 } // end namespace llvm
609
610 #endif
611