1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64InstrInfo.h"
14 #include "AArch64MachineFunctionInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineCombinerPattern.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/StackMaps.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/CodeGen/TargetSubtargetInfo.h"
34 #include "llvm/IR/DebugInfoMetadata.h"
35 #include "llvm/IR/DebugLoc.h"
36 #include "llvm/IR/GlobalValue.h"
37 #include "llvm/MC/MCAsmInfo.h"
38 #include "llvm/MC/MCInst.h"
39 #include "llvm/MC/MCInstBuilder.h"
40 #include "llvm/MC/MCInstrDesc.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CodeGen.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Compiler.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/LEB128.h"
47 #include "llvm/Support/MathExtras.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include <cassert>
51 #include <cstdint>
52 #include <iterator>
53 #include <utility>
54 
55 using namespace llvm;
56 
57 #define GET_INSTRINFO_CTOR_DTOR
58 #include "AArch64GenInstrInfo.inc"
59 
60 static cl::opt<unsigned> TBZDisplacementBits(
61     "aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
62     cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
63 
64 static cl::opt<unsigned> CBZDisplacementBits(
65     "aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
66     cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
67 
68 static cl::opt<unsigned>
69     BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
70                         cl::desc("Restrict range of Bcc instructions (DEBUG)"));
71 
AArch64InstrInfo(const AArch64Subtarget & STI)72 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
73     : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP,
74                           AArch64::CATCHRET),
75       RI(STI.getTargetTriple()), Subtarget(STI) {}
76 
77 /// GetInstSize - Return the number of bytes of code the specified
78 /// instruction may be.  This returns the maximum number of bytes.
getInstSizeInBytes(const MachineInstr & MI) const79 unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
80   const MachineBasicBlock &MBB = *MI.getParent();
81   const MachineFunction *MF = MBB.getParent();
82   const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
83 
84   {
85     auto Op = MI.getOpcode();
86     if (Op == AArch64::INLINEASM || Op == AArch64::INLINEASM_BR)
87       return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
88   }
89 
90   // Meta-instructions emit no code.
91   if (MI.isMetaInstruction())
92     return 0;
93 
94   // FIXME: We currently only handle pseudoinstructions that don't get expanded
95   //        before the assembly printer.
96   unsigned NumBytes = 0;
97   const MCInstrDesc &Desc = MI.getDesc();
98 
99   // Size should be preferably set in
100   // llvm/lib/Target/AArch64/AArch64InstrInfo.td (default case).
101   // Specific cases handle instructions of variable sizes
102   switch (Desc.getOpcode()) {
103   default:
104     if (Desc.getSize())
105       return Desc.getSize();
106 
107     // Anything not explicitly designated otherwise (i.e. pseudo-instructions
108     // with fixed constant size but not specified in .td file) is a normal
109     // 4-byte insn.
110     NumBytes = 4;
111 
112     if (Desc.getSize() > 0)
113       NumBytes = Desc.getSize();
114     break;
115   case TargetOpcode::STACKMAP:
116     // The upper bound for a stackmap intrinsic is the full length of its shadow
117     NumBytes = StackMapOpers(&MI).getNumPatchBytes();
118     assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
119     break;
120   case TargetOpcode::PATCHPOINT:
121     // The size of the patchpoint intrinsic is the number of bytes requested
122     NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
123     assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
124     break;
125   case TargetOpcode::STATEPOINT:
126     NumBytes = StatepointOpers(&MI).getNumPatchBytes();
127     assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
128     // No patch bytes means a normal call inst is emitted
129     if (NumBytes == 0)
130       NumBytes = 4;
131     break;
132   case AArch64::SPACE:
133     NumBytes = MI.getOperand(1).getImm();
134     break;
135   case TargetOpcode::BUNDLE:
136     NumBytes = getInstBundleLength(MI);
137     break;
138   }
139 
140   return NumBytes;
141 }
142 
getInstBundleLength(const MachineInstr & MI) const143 unsigned AArch64InstrInfo::getInstBundleLength(const MachineInstr &MI) const {
144   unsigned Size = 0;
145   MachineBasicBlock::const_instr_iterator I = MI.getIterator();
146   MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
147   while (++I != E && I->isInsideBundle()) {
148     assert(!I->isBundle() && "No nested bundle!");
149     Size += getInstSizeInBytes(*I);
150   }
151   return Size;
152 }
153 
parseCondBranch(MachineInstr * LastInst,MachineBasicBlock * & Target,SmallVectorImpl<MachineOperand> & Cond)154 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
155                             SmallVectorImpl<MachineOperand> &Cond) {
156   // Block ends with fall-through condbranch.
157   switch (LastInst->getOpcode()) {
158   default:
159     llvm_unreachable("Unknown branch instruction?");
160   case AArch64::Bcc:
161     Target = LastInst->getOperand(1).getMBB();
162     Cond.push_back(LastInst->getOperand(0));
163     break;
164   case AArch64::CBZW:
165   case AArch64::CBZX:
166   case AArch64::CBNZW:
167   case AArch64::CBNZX:
168     Target = LastInst->getOperand(1).getMBB();
169     Cond.push_back(MachineOperand::CreateImm(-1));
170     Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
171     Cond.push_back(LastInst->getOperand(0));
172     break;
173   case AArch64::TBZW:
174   case AArch64::TBZX:
175   case AArch64::TBNZW:
176   case AArch64::TBNZX:
177     Target = LastInst->getOperand(2).getMBB();
178     Cond.push_back(MachineOperand::CreateImm(-1));
179     Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
180     Cond.push_back(LastInst->getOperand(0));
181     Cond.push_back(LastInst->getOperand(1));
182   }
183 }
184 
getBranchDisplacementBits(unsigned Opc)185 static unsigned getBranchDisplacementBits(unsigned Opc) {
186   switch (Opc) {
187   default:
188     llvm_unreachable("unexpected opcode!");
189   case AArch64::B:
190     return 64;
191   case AArch64::TBNZW:
192   case AArch64::TBZW:
193   case AArch64::TBNZX:
194   case AArch64::TBZX:
195     return TBZDisplacementBits;
196   case AArch64::CBNZW:
197   case AArch64::CBZW:
198   case AArch64::CBNZX:
199   case AArch64::CBZX:
200     return CBZDisplacementBits;
201   case AArch64::Bcc:
202     return BCCDisplacementBits;
203   }
204 }
205 
isBranchOffsetInRange(unsigned BranchOp,int64_t BrOffset) const206 bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
207                                              int64_t BrOffset) const {
208   unsigned Bits = getBranchDisplacementBits(BranchOp);
209   assert(Bits >= 3 && "max branch displacement must be enough to jump"
210                       "over conditional branch expansion");
211   return isIntN(Bits, BrOffset / 4);
212 }
213 
214 MachineBasicBlock *
getBranchDestBlock(const MachineInstr & MI) const215 AArch64InstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
216   switch (MI.getOpcode()) {
217   default:
218     llvm_unreachable("unexpected opcode!");
219   case AArch64::B:
220     return MI.getOperand(0).getMBB();
221   case AArch64::TBZW:
222   case AArch64::TBNZW:
223   case AArch64::TBZX:
224   case AArch64::TBNZX:
225     return MI.getOperand(2).getMBB();
226   case AArch64::CBZW:
227   case AArch64::CBNZW:
228   case AArch64::CBZX:
229   case AArch64::CBNZX:
230   case AArch64::Bcc:
231     return MI.getOperand(1).getMBB();
232   }
233 }
234 
235 // Branch analysis.
analyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const236 bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
237                                      MachineBasicBlock *&TBB,
238                                      MachineBasicBlock *&FBB,
239                                      SmallVectorImpl<MachineOperand> &Cond,
240                                      bool AllowModify) const {
241   // If the block has no terminators, it just falls into the block after it.
242   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
243   if (I == MBB.end())
244     return false;
245 
246   // Skip over SpeculationBarrierEndBB terminators
247   if (I->getOpcode() == AArch64::SpeculationBarrierISBDSBEndBB ||
248       I->getOpcode() == AArch64::SpeculationBarrierSBEndBB) {
249     --I;
250   }
251 
252   if (!isUnpredicatedTerminator(*I))
253     return false;
254 
255   // Get the last instruction in the block.
256   MachineInstr *LastInst = &*I;
257 
258   // If there is only one terminator instruction, process it.
259   unsigned LastOpc = LastInst->getOpcode();
260   if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
261     if (isUncondBranchOpcode(LastOpc)) {
262       TBB = LastInst->getOperand(0).getMBB();
263       return false;
264     }
265     if (isCondBranchOpcode(LastOpc)) {
266       // Block ends with fall-through condbranch.
267       parseCondBranch(LastInst, TBB, Cond);
268       return false;
269     }
270     return true; // Can't handle indirect branch.
271   }
272 
273   // Get the instruction before it if it is a terminator.
274   MachineInstr *SecondLastInst = &*I;
275   unsigned SecondLastOpc = SecondLastInst->getOpcode();
276 
277   // If AllowModify is true and the block ends with two or more unconditional
278   // branches, delete all but the first unconditional branch.
279   if (AllowModify && isUncondBranchOpcode(LastOpc)) {
280     while (isUncondBranchOpcode(SecondLastOpc)) {
281       LastInst->eraseFromParent();
282       LastInst = SecondLastInst;
283       LastOpc = LastInst->getOpcode();
284       if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
285         // Return now the only terminator is an unconditional branch.
286         TBB = LastInst->getOperand(0).getMBB();
287         return false;
288       } else {
289         SecondLastInst = &*I;
290         SecondLastOpc = SecondLastInst->getOpcode();
291       }
292     }
293   }
294 
295   // If we're allowed to modify and the block ends in a unconditional branch
296   // which could simply fallthrough, remove the branch.  (Note: This case only
297   // matters when we can't understand the whole sequence, otherwise it's also
298   // handled by BranchFolding.cpp.)
299   if (AllowModify && isUncondBranchOpcode(LastOpc) &&
300       MBB.isLayoutSuccessor(getBranchDestBlock(*LastInst))) {
301     LastInst->eraseFromParent();
302     LastInst = SecondLastInst;
303     LastOpc = LastInst->getOpcode();
304     if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
305       assert(!isUncondBranchOpcode(LastOpc) &&
306              "unreachable unconditional branches removed above");
307 
308       if (isCondBranchOpcode(LastOpc)) {
309         // Block ends with fall-through condbranch.
310         parseCondBranch(LastInst, TBB, Cond);
311         return false;
312       }
313       return true; // Can't handle indirect branch.
314     } else {
315       SecondLastInst = &*I;
316       SecondLastOpc = SecondLastInst->getOpcode();
317     }
318   }
319 
320   // If there are three terminators, we don't know what sort of block this is.
321   if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
322     return true;
323 
324   // If the block ends with a B and a Bcc, handle it.
325   if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
326     parseCondBranch(SecondLastInst, TBB, Cond);
327     FBB = LastInst->getOperand(0).getMBB();
328     return false;
329   }
330 
331   // If the block ends with two unconditional branches, handle it.  The second
332   // one is not executed, so remove it.
333   if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
334     TBB = SecondLastInst->getOperand(0).getMBB();
335     I = LastInst;
336     if (AllowModify)
337       I->eraseFromParent();
338     return false;
339   }
340 
341   // ...likewise if it ends with an indirect branch followed by an unconditional
342   // branch.
343   if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
344     I = LastInst;
345     if (AllowModify)
346       I->eraseFromParent();
347     return true;
348   }
349 
350   // Otherwise, can't handle this.
351   return true;
352 }
353 
analyzeBranchPredicate(MachineBasicBlock & MBB,MachineBranchPredicate & MBP,bool AllowModify) const354 bool AArch64InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
355                                               MachineBranchPredicate &MBP,
356                                               bool AllowModify) const {
357   // For the moment, handle only a block which ends with a cb(n)zx followed by
358   // a fallthrough.  Why this?  Because it is a common form.
359   // TODO: Should we handle b.cc?
360 
361   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
362   if (I == MBB.end())
363     return true;
364 
365   // Skip over SpeculationBarrierEndBB terminators
366   if (I->getOpcode() == AArch64::SpeculationBarrierISBDSBEndBB ||
367       I->getOpcode() == AArch64::SpeculationBarrierSBEndBB) {
368     --I;
369   }
370 
371   if (!isUnpredicatedTerminator(*I))
372     return true;
373 
374   // Get the last instruction in the block.
375   MachineInstr *LastInst = &*I;
376   unsigned LastOpc = LastInst->getOpcode();
377   if (!isCondBranchOpcode(LastOpc))
378     return true;
379 
380   switch (LastOpc) {
381   default:
382     return true;
383   case AArch64::CBZW:
384   case AArch64::CBZX:
385   case AArch64::CBNZW:
386   case AArch64::CBNZX:
387     break;
388   };
389 
390   MBP.TrueDest = LastInst->getOperand(1).getMBB();
391   assert(MBP.TrueDest && "expected!");
392   MBP.FalseDest = MBB.getNextNode();
393 
394   MBP.ConditionDef = nullptr;
395   MBP.SingleUseCondition = false;
396 
397   MBP.LHS = LastInst->getOperand(0);
398   MBP.RHS = MachineOperand::CreateImm(0);
399   MBP.Predicate = LastOpc == AArch64::CBNZX ? MachineBranchPredicate::PRED_NE
400                                             : MachineBranchPredicate::PRED_EQ;
401   return false;
402 }
403 
reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const404 bool AArch64InstrInfo::reverseBranchCondition(
405     SmallVectorImpl<MachineOperand> &Cond) const {
406   if (Cond[0].getImm() != -1) {
407     // Regular Bcc
408     AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
409     Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
410   } else {
411     // Folded compare-and-branch
412     switch (Cond[1].getImm()) {
413     default:
414       llvm_unreachable("Unknown conditional branch!");
415     case AArch64::CBZW:
416       Cond[1].setImm(AArch64::CBNZW);
417       break;
418     case AArch64::CBNZW:
419       Cond[1].setImm(AArch64::CBZW);
420       break;
421     case AArch64::CBZX:
422       Cond[1].setImm(AArch64::CBNZX);
423       break;
424     case AArch64::CBNZX:
425       Cond[1].setImm(AArch64::CBZX);
426       break;
427     case AArch64::TBZW:
428       Cond[1].setImm(AArch64::TBNZW);
429       break;
430     case AArch64::TBNZW:
431       Cond[1].setImm(AArch64::TBZW);
432       break;
433     case AArch64::TBZX:
434       Cond[1].setImm(AArch64::TBNZX);
435       break;
436     case AArch64::TBNZX:
437       Cond[1].setImm(AArch64::TBZX);
438       break;
439     }
440   }
441 
442   return false;
443 }
444 
removeBranch(MachineBasicBlock & MBB,int * BytesRemoved) const445 unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
446                                         int *BytesRemoved) const {
447   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
448   if (I == MBB.end())
449     return 0;
450 
451   if (!isUncondBranchOpcode(I->getOpcode()) &&
452       !isCondBranchOpcode(I->getOpcode()))
453     return 0;
454 
455   // Remove the branch.
456   I->eraseFromParent();
457 
458   I = MBB.end();
459 
460   if (I == MBB.begin()) {
461     if (BytesRemoved)
462       *BytesRemoved = 4;
463     return 1;
464   }
465   --I;
466   if (!isCondBranchOpcode(I->getOpcode())) {
467     if (BytesRemoved)
468       *BytesRemoved = 4;
469     return 1;
470   }
471 
472   // Remove the branch.
473   I->eraseFromParent();
474   if (BytesRemoved)
475     *BytesRemoved = 8;
476 
477   return 2;
478 }
479 
instantiateCondBranch(MachineBasicBlock & MBB,const DebugLoc & DL,MachineBasicBlock * TBB,ArrayRef<MachineOperand> Cond) const480 void AArch64InstrInfo::instantiateCondBranch(
481     MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
482     ArrayRef<MachineOperand> Cond) const {
483   if (Cond[0].getImm() != -1) {
484     // Regular Bcc
485     BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
486   } else {
487     // Folded compare-and-branch
488     // Note that we use addOperand instead of addReg to keep the flags.
489     const MachineInstrBuilder MIB =
490         BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]);
491     if (Cond.size() > 3)
492       MIB.addImm(Cond[3].getImm());
493     MIB.addMBB(TBB);
494   }
495 }
496 
insertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,const DebugLoc & DL,int * BytesAdded) const497 unsigned AArch64InstrInfo::insertBranch(
498     MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
499     ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
500   // Shouldn't be a fall through.
501   assert(TBB && "insertBranch must not be told to insert a fallthrough");
502 
503   if (!FBB) {
504     if (Cond.empty()) // Unconditional branch?
505       BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
506     else
507       instantiateCondBranch(MBB, DL, TBB, Cond);
508 
509     if (BytesAdded)
510       *BytesAdded = 4;
511 
512     return 1;
513   }
514 
515   // Two-way conditional branch.
516   instantiateCondBranch(MBB, DL, TBB, Cond);
517   BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
518 
519   if (BytesAdded)
520     *BytesAdded = 8;
521 
522   return 2;
523 }
524 
525 // Find the original register that VReg is copied from.
removeCopies(const MachineRegisterInfo & MRI,unsigned VReg)526 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
527   while (Register::isVirtualRegister(VReg)) {
528     const MachineInstr *DefMI = MRI.getVRegDef(VReg);
529     if (!DefMI->isFullCopy())
530       return VReg;
531     VReg = DefMI->getOperand(1).getReg();
532   }
533   return VReg;
534 }
535 
536 // Determine if VReg is defined by an instruction that can be folded into a
537 // csel instruction. If so, return the folded opcode, and the replacement
538 // register.
canFoldIntoCSel(const MachineRegisterInfo & MRI,unsigned VReg,unsigned * NewVReg=nullptr)539 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
540                                 unsigned *NewVReg = nullptr) {
541   VReg = removeCopies(MRI, VReg);
542   if (!Register::isVirtualRegister(VReg))
543     return 0;
544 
545   bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
546   const MachineInstr *DefMI = MRI.getVRegDef(VReg);
547   unsigned Opc = 0;
548   unsigned SrcOpNum = 0;
549   switch (DefMI->getOpcode()) {
550   case AArch64::ADDSXri:
551   case AArch64::ADDSWri:
552     // if NZCV is used, do not fold.
553     if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
554       return 0;
555     // fall-through to ADDXri and ADDWri.
556     [[fallthrough]];
557   case AArch64::ADDXri:
558   case AArch64::ADDWri:
559     // add x, 1 -> csinc.
560     if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
561         DefMI->getOperand(3).getImm() != 0)
562       return 0;
563     SrcOpNum = 1;
564     Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
565     break;
566 
567   case AArch64::ORNXrr:
568   case AArch64::ORNWrr: {
569     // not x -> csinv, represented as orn dst, xzr, src.
570     unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
571     if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
572       return 0;
573     SrcOpNum = 2;
574     Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
575     break;
576   }
577 
578   case AArch64::SUBSXrr:
579   case AArch64::SUBSWrr:
580     // if NZCV is used, do not fold.
581     if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
582       return 0;
583     // fall-through to SUBXrr and SUBWrr.
584     [[fallthrough]];
585   case AArch64::SUBXrr:
586   case AArch64::SUBWrr: {
587     // neg x -> csneg, represented as sub dst, xzr, src.
588     unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
589     if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
590       return 0;
591     SrcOpNum = 2;
592     Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
593     break;
594   }
595   default:
596     return 0;
597   }
598   assert(Opc && SrcOpNum && "Missing parameters");
599 
600   if (NewVReg)
601     *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
602   return Opc;
603 }
604 
canInsertSelect(const MachineBasicBlock & MBB,ArrayRef<MachineOperand> Cond,Register DstReg,Register TrueReg,Register FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles) const605 bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
606                                        ArrayRef<MachineOperand> Cond,
607                                        Register DstReg, Register TrueReg,
608                                        Register FalseReg, int &CondCycles,
609                                        int &TrueCycles,
610                                        int &FalseCycles) const {
611   // Check register classes.
612   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
613   const TargetRegisterClass *RC =
614       RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
615   if (!RC)
616     return false;
617 
618   // Also need to check the dest regclass, in case we're trying to optimize
619   // something like:
620   // %1(gpr) = PHI %2(fpr), bb1, %(fpr), bb2
621   if (!RI.getCommonSubClass(RC, MRI.getRegClass(DstReg)))
622     return false;
623 
624   // Expanding cbz/tbz requires an extra cycle of latency on the condition.
625   unsigned ExtraCondLat = Cond.size() != 1;
626 
627   // GPRs are handled by csel.
628   // FIXME: Fold in x+1, -x, and ~x when applicable.
629   if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
630       AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
631     // Single-cycle csel, csinc, csinv, and csneg.
632     CondCycles = 1 + ExtraCondLat;
633     TrueCycles = FalseCycles = 1;
634     if (canFoldIntoCSel(MRI, TrueReg))
635       TrueCycles = 0;
636     else if (canFoldIntoCSel(MRI, FalseReg))
637       FalseCycles = 0;
638     return true;
639   }
640 
641   // Scalar floating point is handled by fcsel.
642   // FIXME: Form fabs, fmin, and fmax when applicable.
643   if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
644       AArch64::FPR32RegClass.hasSubClassEq(RC)) {
645     CondCycles = 5 + ExtraCondLat;
646     TrueCycles = FalseCycles = 2;
647     return true;
648   }
649 
650   // Can't do vectors.
651   return false;
652 }
653 
insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DstReg,ArrayRef<MachineOperand> Cond,Register TrueReg,Register FalseReg) const654 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
655                                     MachineBasicBlock::iterator I,
656                                     const DebugLoc &DL, Register DstReg,
657                                     ArrayRef<MachineOperand> Cond,
658                                     Register TrueReg, Register FalseReg) const {
659   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
660 
661   // Parse the condition code, see parseCondBranch() above.
662   AArch64CC::CondCode CC;
663   switch (Cond.size()) {
664   default:
665     llvm_unreachable("Unknown condition opcode in Cond");
666   case 1: // b.cc
667     CC = AArch64CC::CondCode(Cond[0].getImm());
668     break;
669   case 3: { // cbz/cbnz
670     // We must insert a compare against 0.
671     bool Is64Bit;
672     switch (Cond[1].getImm()) {
673     default:
674       llvm_unreachable("Unknown branch opcode in Cond");
675     case AArch64::CBZW:
676       Is64Bit = false;
677       CC = AArch64CC::EQ;
678       break;
679     case AArch64::CBZX:
680       Is64Bit = true;
681       CC = AArch64CC::EQ;
682       break;
683     case AArch64::CBNZW:
684       Is64Bit = false;
685       CC = AArch64CC::NE;
686       break;
687     case AArch64::CBNZX:
688       Is64Bit = true;
689       CC = AArch64CC::NE;
690       break;
691     }
692     Register SrcReg = Cond[2].getReg();
693     if (Is64Bit) {
694       // cmp reg, #0 is actually subs xzr, reg, #0.
695       MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
696       BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
697           .addReg(SrcReg)
698           .addImm(0)
699           .addImm(0);
700     } else {
701       MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
702       BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
703           .addReg(SrcReg)
704           .addImm(0)
705           .addImm(0);
706     }
707     break;
708   }
709   case 4: { // tbz/tbnz
710     // We must insert a tst instruction.
711     switch (Cond[1].getImm()) {
712     default:
713       llvm_unreachable("Unknown branch opcode in Cond");
714     case AArch64::TBZW:
715     case AArch64::TBZX:
716       CC = AArch64CC::EQ;
717       break;
718     case AArch64::TBNZW:
719     case AArch64::TBNZX:
720       CC = AArch64CC::NE;
721       break;
722     }
723     // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
724     if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
725       BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
726           .addReg(Cond[2].getReg())
727           .addImm(
728               AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
729     else
730       BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
731           .addReg(Cond[2].getReg())
732           .addImm(
733               AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
734     break;
735   }
736   }
737 
738   unsigned Opc = 0;
739   const TargetRegisterClass *RC = nullptr;
740   bool TryFold = false;
741   if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
742     RC = &AArch64::GPR64RegClass;
743     Opc = AArch64::CSELXr;
744     TryFold = true;
745   } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
746     RC = &AArch64::GPR32RegClass;
747     Opc = AArch64::CSELWr;
748     TryFold = true;
749   } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
750     RC = &AArch64::FPR64RegClass;
751     Opc = AArch64::FCSELDrrr;
752   } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
753     RC = &AArch64::FPR32RegClass;
754     Opc = AArch64::FCSELSrrr;
755   }
756   assert(RC && "Unsupported regclass");
757 
758   // Try folding simple instructions into the csel.
759   if (TryFold) {
760     unsigned NewVReg = 0;
761     unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
762     if (FoldedOpc) {
763       // The folded opcodes csinc, csinc and csneg apply the operation to
764       // FalseReg, so we need to invert the condition.
765       CC = AArch64CC::getInvertedCondCode(CC);
766       TrueReg = FalseReg;
767     } else
768       FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
769 
770     // Fold the operation. Leave any dead instructions for DCE to clean up.
771     if (FoldedOpc) {
772       FalseReg = NewVReg;
773       Opc = FoldedOpc;
774       // The extends the live range of NewVReg.
775       MRI.clearKillFlags(NewVReg);
776     }
777   }
778 
779   // Pull all virtual register into the appropriate class.
780   MRI.constrainRegClass(TrueReg, RC);
781   MRI.constrainRegClass(FalseReg, RC);
782 
783   // Insert the csel.
784   BuildMI(MBB, I, DL, get(Opc), DstReg)
785       .addReg(TrueReg)
786       .addReg(FalseReg)
787       .addImm(CC);
788 }
789 
790 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an  ORRxx.
canBeExpandedToORR(const MachineInstr & MI,unsigned BitSize)791 static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
792   uint64_t Imm = MI.getOperand(1).getImm();
793   uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
794   uint64_t Encoding;
795   return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
796 }
797 
798 // FIXME: this implementation should be micro-architecture dependent, so a
799 // micro-architecture target hook should be introduced here in future.
isAsCheapAsAMove(const MachineInstr & MI) const800 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
801   if (!Subtarget.hasCustomCheapAsMoveHandling())
802     return MI.isAsCheapAsAMove();
803 
804   const unsigned Opcode = MI.getOpcode();
805 
806   // Firstly, check cases gated by features.
807 
808   if (Subtarget.hasZeroCycleZeroingFP()) {
809     if (Opcode == AArch64::FMOVH0 ||
810         Opcode == AArch64::FMOVS0 ||
811         Opcode == AArch64::FMOVD0)
812       return true;
813   }
814 
815   if (Subtarget.hasZeroCycleZeroingGP()) {
816     if (Opcode == TargetOpcode::COPY &&
817         (MI.getOperand(1).getReg() == AArch64::WZR ||
818          MI.getOperand(1).getReg() == AArch64::XZR))
819       return true;
820   }
821 
822   // Secondly, check cases specific to sub-targets.
823 
824   if (Subtarget.hasExynosCheapAsMoveHandling()) {
825     if (isExynosCheapAsMove(MI))
826       return true;
827 
828     return MI.isAsCheapAsAMove();
829   }
830 
831   // Finally, check generic cases.
832 
833   switch (Opcode) {
834   default:
835     return false;
836 
837   // add/sub on register without shift
838   case AArch64::ADDWri:
839   case AArch64::ADDXri:
840   case AArch64::SUBWri:
841   case AArch64::SUBXri:
842     return (MI.getOperand(3).getImm() == 0);
843 
844   // logical ops on immediate
845   case AArch64::ANDWri:
846   case AArch64::ANDXri:
847   case AArch64::EORWri:
848   case AArch64::EORXri:
849   case AArch64::ORRWri:
850   case AArch64::ORRXri:
851     return true;
852 
853   // logical ops on register without shift
854   case AArch64::ANDWrr:
855   case AArch64::ANDXrr:
856   case AArch64::BICWrr:
857   case AArch64::BICXrr:
858   case AArch64::EONWrr:
859   case AArch64::EONXrr:
860   case AArch64::EORWrr:
861   case AArch64::EORXrr:
862   case AArch64::ORNWrr:
863   case AArch64::ORNXrr:
864   case AArch64::ORRWrr:
865   case AArch64::ORRXrr:
866     return true;
867 
868   // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
869   // ORRXri, it is as cheap as MOV
870   case AArch64::MOVi32imm:
871     return canBeExpandedToORR(MI, 32);
872   case AArch64::MOVi64imm:
873     return canBeExpandedToORR(MI, 64);
874   }
875 
876   llvm_unreachable("Unknown opcode to check as cheap as a move!");
877 }
878 
isFalkorShiftExtFast(const MachineInstr & MI)879 bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) {
880   switch (MI.getOpcode()) {
881   default:
882     return false;
883 
884   case AArch64::ADDWrs:
885   case AArch64::ADDXrs:
886   case AArch64::ADDSWrs:
887   case AArch64::ADDSXrs: {
888     unsigned Imm = MI.getOperand(3).getImm();
889     unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
890     if (ShiftVal == 0)
891       return true;
892     return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5;
893   }
894 
895   case AArch64::ADDWrx:
896   case AArch64::ADDXrx:
897   case AArch64::ADDXrx64:
898   case AArch64::ADDSWrx:
899   case AArch64::ADDSXrx:
900   case AArch64::ADDSXrx64: {
901     unsigned Imm = MI.getOperand(3).getImm();
902     switch (AArch64_AM::getArithExtendType(Imm)) {
903     default:
904       return false;
905     case AArch64_AM::UXTB:
906     case AArch64_AM::UXTH:
907     case AArch64_AM::UXTW:
908     case AArch64_AM::UXTX:
909       return AArch64_AM::getArithShiftValue(Imm) <= 4;
910     }
911   }
912 
913   case AArch64::SUBWrs:
914   case AArch64::SUBSWrs: {
915     unsigned Imm = MI.getOperand(3).getImm();
916     unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
917     return ShiftVal == 0 ||
918            (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31);
919   }
920 
921   case AArch64::SUBXrs:
922   case AArch64::SUBSXrs: {
923     unsigned Imm = MI.getOperand(3).getImm();
924     unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
925     return ShiftVal == 0 ||
926            (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63);
927   }
928 
929   case AArch64::SUBWrx:
930   case AArch64::SUBXrx:
931   case AArch64::SUBXrx64:
932   case AArch64::SUBSWrx:
933   case AArch64::SUBSXrx:
934   case AArch64::SUBSXrx64: {
935     unsigned Imm = MI.getOperand(3).getImm();
936     switch (AArch64_AM::getArithExtendType(Imm)) {
937     default:
938       return false;
939     case AArch64_AM::UXTB:
940     case AArch64_AM::UXTH:
941     case AArch64_AM::UXTW:
942     case AArch64_AM::UXTX:
943       return AArch64_AM::getArithShiftValue(Imm) == 0;
944     }
945   }
946 
947   case AArch64::LDRBBroW:
948   case AArch64::LDRBBroX:
949   case AArch64::LDRBroW:
950   case AArch64::LDRBroX:
951   case AArch64::LDRDroW:
952   case AArch64::LDRDroX:
953   case AArch64::LDRHHroW:
954   case AArch64::LDRHHroX:
955   case AArch64::LDRHroW:
956   case AArch64::LDRHroX:
957   case AArch64::LDRQroW:
958   case AArch64::LDRQroX:
959   case AArch64::LDRSBWroW:
960   case AArch64::LDRSBWroX:
961   case AArch64::LDRSBXroW:
962   case AArch64::LDRSBXroX:
963   case AArch64::LDRSHWroW:
964   case AArch64::LDRSHWroX:
965   case AArch64::LDRSHXroW:
966   case AArch64::LDRSHXroX:
967   case AArch64::LDRSWroW:
968   case AArch64::LDRSWroX:
969   case AArch64::LDRSroW:
970   case AArch64::LDRSroX:
971   case AArch64::LDRWroW:
972   case AArch64::LDRWroX:
973   case AArch64::LDRXroW:
974   case AArch64::LDRXroX:
975   case AArch64::PRFMroW:
976   case AArch64::PRFMroX:
977   case AArch64::STRBBroW:
978   case AArch64::STRBBroX:
979   case AArch64::STRBroW:
980   case AArch64::STRBroX:
981   case AArch64::STRDroW:
982   case AArch64::STRDroX:
983   case AArch64::STRHHroW:
984   case AArch64::STRHHroX:
985   case AArch64::STRHroW:
986   case AArch64::STRHroX:
987   case AArch64::STRQroW:
988   case AArch64::STRQroX:
989   case AArch64::STRSroW:
990   case AArch64::STRSroX:
991   case AArch64::STRWroW:
992   case AArch64::STRWroX:
993   case AArch64::STRXroW:
994   case AArch64::STRXroX: {
995     unsigned IsSigned = MI.getOperand(3).getImm();
996     return !IsSigned;
997   }
998   }
999 }
1000 
isSEHInstruction(const MachineInstr & MI)1001 bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) {
1002   unsigned Opc = MI.getOpcode();
1003   switch (Opc) {
1004     default:
1005       return false;
1006     case AArch64::SEH_StackAlloc:
1007     case AArch64::SEH_SaveFPLR:
1008     case AArch64::SEH_SaveFPLR_X:
1009     case AArch64::SEH_SaveReg:
1010     case AArch64::SEH_SaveReg_X:
1011     case AArch64::SEH_SaveRegP:
1012     case AArch64::SEH_SaveRegP_X:
1013     case AArch64::SEH_SaveFReg:
1014     case AArch64::SEH_SaveFReg_X:
1015     case AArch64::SEH_SaveFRegP:
1016     case AArch64::SEH_SaveFRegP_X:
1017     case AArch64::SEH_SetFP:
1018     case AArch64::SEH_AddFP:
1019     case AArch64::SEH_Nop:
1020     case AArch64::SEH_PrologEnd:
1021     case AArch64::SEH_EpilogStart:
1022     case AArch64::SEH_EpilogEnd:
1023     case AArch64::SEH_PACSignLR:
1024       return true;
1025   }
1026 }
1027 
isCoalescableExtInstr(const MachineInstr & MI,Register & SrcReg,Register & DstReg,unsigned & SubIdx) const1028 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
1029                                              Register &SrcReg, Register &DstReg,
1030                                              unsigned &SubIdx) const {
1031   switch (MI.getOpcode()) {
1032   default:
1033     return false;
1034   case AArch64::SBFMXri: // aka sxtw
1035   case AArch64::UBFMXri: // aka uxtw
1036     // Check for the 32 -> 64 bit extension case, these instructions can do
1037     // much more.
1038     if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
1039       return false;
1040     // This is a signed or unsigned 32 -> 64 bit extension.
1041     SrcReg = MI.getOperand(1).getReg();
1042     DstReg = MI.getOperand(0).getReg();
1043     SubIdx = AArch64::sub_32;
1044     return true;
1045   }
1046 }
1047 
areMemAccessesTriviallyDisjoint(const MachineInstr & MIa,const MachineInstr & MIb) const1048 bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
1049     const MachineInstr &MIa, const MachineInstr &MIb) const {
1050   const TargetRegisterInfo *TRI = &getRegisterInfo();
1051   const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
1052   int64_t OffsetA = 0, OffsetB = 0;
1053   unsigned WidthA = 0, WidthB = 0;
1054   bool OffsetAIsScalable = false, OffsetBIsScalable = false;
1055 
1056   assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
1057   assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
1058 
1059   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1060       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1061     return false;
1062 
1063   // Retrieve the base, offset from the base and width. Width
1064   // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8).  If
1065   // base are identical, and the offset of a lower memory access +
1066   // the width doesn't overlap the offset of a higher memory access,
1067   // then the memory accesses are different.
1068   // If OffsetAIsScalable and OffsetBIsScalable are both true, they
1069   // are assumed to have the same scale (vscale).
1070   if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, OffsetAIsScalable,
1071                                    WidthA, TRI) &&
1072       getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, OffsetBIsScalable,
1073                                    WidthB, TRI)) {
1074     if (BaseOpA->isIdenticalTo(*BaseOpB) &&
1075         OffsetAIsScalable == OffsetBIsScalable) {
1076       int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1077       int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1078       int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1079       if (LowOffset + LowWidth <= HighOffset)
1080         return true;
1081     }
1082   }
1083   return false;
1084 }
1085 
isSchedulingBoundary(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const1086 bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1087                                             const MachineBasicBlock *MBB,
1088                                             const MachineFunction &MF) const {
1089   if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF))
1090     return true;
1091   switch (MI.getOpcode()) {
1092   case AArch64::HINT:
1093     // CSDB hints are scheduling barriers.
1094     if (MI.getOperand(0).getImm() == 0x14)
1095       return true;
1096     break;
1097   case AArch64::DSB:
1098   case AArch64::ISB:
1099     // DSB and ISB also are scheduling barriers.
1100     return true;
1101   case AArch64::MSRpstatesvcrImm1:
1102     // SMSTART and SMSTOP are also scheduling barriers.
1103     return true;
1104   default:;
1105   }
1106   if (isSEHInstruction(MI))
1107     return true;
1108   auto Next = std::next(MI.getIterator());
1109   return Next != MBB->end() && Next->isCFIInstruction();
1110 }
1111 
1112 /// analyzeCompare - For a comparison instruction, return the source registers
1113 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
1114 /// Return true if the comparison instruction can be analyzed.
analyzeCompare(const MachineInstr & MI,Register & SrcReg,Register & SrcReg2,int64_t & CmpMask,int64_t & CmpValue) const1115 bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1116                                       Register &SrcReg2, int64_t &CmpMask,
1117                                       int64_t &CmpValue) const {
1118   // The first operand can be a frame index where we'd normally expect a
1119   // register.
1120   assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands");
1121   if (!MI.getOperand(1).isReg())
1122     return false;
1123 
1124   switch (MI.getOpcode()) {
1125   default:
1126     break;
1127   case AArch64::PTEST_PP:
1128   case AArch64::PTEST_PP_ANY:
1129     SrcReg = MI.getOperand(0).getReg();
1130     SrcReg2 = MI.getOperand(1).getReg();
1131     // Not sure about the mask and value for now...
1132     CmpMask = ~0;
1133     CmpValue = 0;
1134     return true;
1135   case AArch64::SUBSWrr:
1136   case AArch64::SUBSWrs:
1137   case AArch64::SUBSWrx:
1138   case AArch64::SUBSXrr:
1139   case AArch64::SUBSXrs:
1140   case AArch64::SUBSXrx:
1141   case AArch64::ADDSWrr:
1142   case AArch64::ADDSWrs:
1143   case AArch64::ADDSWrx:
1144   case AArch64::ADDSXrr:
1145   case AArch64::ADDSXrs:
1146   case AArch64::ADDSXrx:
1147     // Replace SUBSWrr with SUBWrr if NZCV is not used.
1148     SrcReg = MI.getOperand(1).getReg();
1149     SrcReg2 = MI.getOperand(2).getReg();
1150     CmpMask = ~0;
1151     CmpValue = 0;
1152     return true;
1153   case AArch64::SUBSWri:
1154   case AArch64::ADDSWri:
1155   case AArch64::SUBSXri:
1156   case AArch64::ADDSXri:
1157     SrcReg = MI.getOperand(1).getReg();
1158     SrcReg2 = 0;
1159     CmpMask = ~0;
1160     CmpValue = MI.getOperand(2).getImm();
1161     return true;
1162   case AArch64::ANDSWri:
1163   case AArch64::ANDSXri:
1164     // ANDS does not use the same encoding scheme as the others xxxS
1165     // instructions.
1166     SrcReg = MI.getOperand(1).getReg();
1167     SrcReg2 = 0;
1168     CmpMask = ~0;
1169     CmpValue = AArch64_AM::decodeLogicalImmediate(
1170                    MI.getOperand(2).getImm(),
1171                    MI.getOpcode() == AArch64::ANDSWri ? 32 : 64);
1172     return true;
1173   }
1174 
1175   return false;
1176 }
1177 
UpdateOperandRegClass(MachineInstr & Instr)1178 static bool UpdateOperandRegClass(MachineInstr &Instr) {
1179   MachineBasicBlock *MBB = Instr.getParent();
1180   assert(MBB && "Can't get MachineBasicBlock here");
1181   MachineFunction *MF = MBB->getParent();
1182   assert(MF && "Can't get MachineFunction here");
1183   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1184   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1185   MachineRegisterInfo *MRI = &MF->getRegInfo();
1186 
1187   for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
1188        ++OpIdx) {
1189     MachineOperand &MO = Instr.getOperand(OpIdx);
1190     const TargetRegisterClass *OpRegCstraints =
1191         Instr.getRegClassConstraint(OpIdx, TII, TRI);
1192 
1193     // If there's no constraint, there's nothing to do.
1194     if (!OpRegCstraints)
1195       continue;
1196     // If the operand is a frame index, there's nothing to do here.
1197     // A frame index operand will resolve correctly during PEI.
1198     if (MO.isFI())
1199       continue;
1200 
1201     assert(MO.isReg() &&
1202            "Operand has register constraints without being a register!");
1203 
1204     Register Reg = MO.getReg();
1205     if (Reg.isPhysical()) {
1206       if (!OpRegCstraints->contains(Reg))
1207         return false;
1208     } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
1209                !MRI->constrainRegClass(Reg, OpRegCstraints))
1210       return false;
1211   }
1212 
1213   return true;
1214 }
1215 
1216 /// Return the opcode that does not set flags when possible - otherwise
1217 /// return the original opcode. The caller is responsible to do the actual
1218 /// substitution and legality checking.
convertToNonFlagSettingOpc(const MachineInstr & MI)1219 static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
1220   // Don't convert all compare instructions, because for some the zero register
1221   // encoding becomes the sp register.
1222   bool MIDefinesZeroReg = false;
1223   if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
1224     MIDefinesZeroReg = true;
1225 
1226   switch (MI.getOpcode()) {
1227   default:
1228     return MI.getOpcode();
1229   case AArch64::ADDSWrr:
1230     return AArch64::ADDWrr;
1231   case AArch64::ADDSWri:
1232     return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
1233   case AArch64::ADDSWrs:
1234     return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
1235   case AArch64::ADDSWrx:
1236     return AArch64::ADDWrx;
1237   case AArch64::ADDSXrr:
1238     return AArch64::ADDXrr;
1239   case AArch64::ADDSXri:
1240     return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
1241   case AArch64::ADDSXrs:
1242     return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
1243   case AArch64::ADDSXrx:
1244     return AArch64::ADDXrx;
1245   case AArch64::SUBSWrr:
1246     return AArch64::SUBWrr;
1247   case AArch64::SUBSWri:
1248     return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
1249   case AArch64::SUBSWrs:
1250     return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
1251   case AArch64::SUBSWrx:
1252     return AArch64::SUBWrx;
1253   case AArch64::SUBSXrr:
1254     return AArch64::SUBXrr;
1255   case AArch64::SUBSXri:
1256     return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
1257   case AArch64::SUBSXrs:
1258     return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
1259   case AArch64::SUBSXrx:
1260     return AArch64::SUBXrx;
1261   }
1262 }
1263 
1264 enum AccessKind { AK_Write = 0x01, AK_Read = 0x10, AK_All = 0x11 };
1265 
1266 /// True when condition flags are accessed (either by writing or reading)
1267 /// on the instruction trace starting at From and ending at To.
1268 ///
1269 /// Note: If From and To are from different blocks it's assumed CC are accessed
1270 ///       on the path.
areCFlagsAccessedBetweenInstrs(MachineBasicBlock::iterator From,MachineBasicBlock::iterator To,const TargetRegisterInfo * TRI,const AccessKind AccessToCheck=AK_All)1271 static bool areCFlagsAccessedBetweenInstrs(
1272     MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
1273     const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
1274   // Early exit if To is at the beginning of the BB.
1275   if (To == To->getParent()->begin())
1276     return true;
1277 
1278   // Check whether the instructions are in the same basic block
1279   // If not, assume the condition flags might get modified somewhere.
1280   if (To->getParent() != From->getParent())
1281     return true;
1282 
1283   // From must be above To.
1284   assert(std::any_of(
1285       ++To.getReverse(), To->getParent()->rend(),
1286       [From](MachineInstr &MI) { return MI.getIterator() == From; }));
1287 
1288   // We iterate backward starting at \p To until we hit \p From.
1289   for (const MachineInstr &Instr :
1290        instructionsWithoutDebug(++To.getReverse(), From.getReverse())) {
1291     if (((AccessToCheck & AK_Write) &&
1292          Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
1293         ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
1294       return true;
1295   }
1296   return false;
1297 }
1298 
1299 /// optimizePTestInstr - Attempt to remove a ptest of a predicate-generating
1300 /// operation which could set the flags in an identical manner
optimizePTestInstr(MachineInstr * PTest,unsigned MaskReg,unsigned PredReg,const MachineRegisterInfo * MRI) const1301 bool AArch64InstrInfo::optimizePTestInstr(
1302     MachineInstr *PTest, unsigned MaskReg, unsigned PredReg,
1303     const MachineRegisterInfo *MRI) const {
1304   auto *Mask = MRI->getUniqueVRegDef(MaskReg);
1305   auto *Pred = MRI->getUniqueVRegDef(PredReg);
1306   auto NewOp = Pred->getOpcode();
1307   bool OpChanged = false;
1308 
1309   unsigned MaskOpcode = Mask->getOpcode();
1310   unsigned PredOpcode = Pred->getOpcode();
1311   bool PredIsPTestLike = isPTestLikeOpcode(PredOpcode);
1312   bool PredIsWhileLike = isWhileOpcode(PredOpcode);
1313 
1314   if (isPTrueOpcode(MaskOpcode) && (PredIsPTestLike || PredIsWhileLike) &&
1315       getElementSizeForOpcode(MaskOpcode) ==
1316           getElementSizeForOpcode(PredOpcode) &&
1317       Mask->getOperand(1).getImm() == 31) {
1318     // For PTEST(PTRUE_ALL, WHILE), if the element size matches, the PTEST is
1319     // redundant since WHILE performs an implicit PTEST with an all active
1320     // mask. Must be an all active predicate of matching element size.
1321 
1322     // For PTEST(PTRUE_ALL, PTEST_LIKE), the PTEST is redundant if the
1323     // PTEST_LIKE instruction uses the same all active mask and the element
1324     // size matches. If the PTEST has a condition of any then it is always
1325     // redundant.
1326     if (PredIsPTestLike) {
1327       auto PTestLikeMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg());
1328       if (Mask != PTestLikeMask && PTest->getOpcode() != AArch64::PTEST_PP_ANY)
1329         return false;
1330     }
1331 
1332     // Fallthough to simply remove the PTEST.
1333   } else if ((Mask == Pred) && (PredIsPTestLike || PredIsWhileLike) &&
1334              PTest->getOpcode() == AArch64::PTEST_PP_ANY) {
1335     // For PTEST(PG, PG), PTEST is redundant when PG is the result of an
1336     // instruction that sets the flags as PTEST would. This is only valid when
1337     // the condition is any.
1338 
1339     // Fallthough to simply remove the PTEST.
1340   } else if (PredIsPTestLike) {
1341     // For PTEST(PG, PTEST_LIKE(PG, ...)), the PTEST is redundant since the
1342     // flags are set based on the same mask 'PG', but PTEST_LIKE must operate
1343     // on 8-bit predicates like the PTEST.  Otherwise, for instructions like
1344     // compare that also support 16/32/64-bit predicates, the implicit PTEST
1345     // performed by the compare could consider fewer lanes for these element
1346     // sizes.
1347     //
1348     // For example, consider
1349     //
1350     //   ptrue p0.b                    ; P0=1111-1111-1111-1111
1351     //   index z0.s, #0, #1            ; Z0=<0,1,2,3>
1352     //   index z1.s, #1, #1            ; Z1=<1,2,3,4>
1353     //   cmphi p1.s, p0/z, z1.s, z0.s  ; P1=0001-0001-0001-0001
1354     //                                 ;       ^ last active
1355     //   ptest p0, p1.b                ; P1=0001-0001-0001-0001
1356     //                                 ;     ^ last active
1357     //
1358     // where the compare generates a canonical all active 32-bit predicate
1359     // (equivalent to 'ptrue p1.s, all'). The implicit PTEST sets the last
1360     // active flag, whereas the PTEST instruction with the same mask doesn't.
1361     // For PTEST_ANY this doesn't apply as the flags in this case would be
1362     // identical regardless of element size.
1363     auto PTestLikeMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg());
1364     uint64_t PredElementSize = getElementSizeForOpcode(PredOpcode);
1365     if ((Mask != PTestLikeMask) ||
1366         (PredElementSize != AArch64::ElementSizeB &&
1367          PTest->getOpcode() != AArch64::PTEST_PP_ANY))
1368       return false;
1369 
1370     // Fallthough to simply remove the PTEST.
1371   } else {
1372     // If OP in PTEST(PG, OP(PG, ...)) has a flag-setting variant change the
1373     // opcode so the PTEST becomes redundant.
1374     switch (PredOpcode) {
1375     case AArch64::AND_PPzPP:
1376     case AArch64::BIC_PPzPP:
1377     case AArch64::EOR_PPzPP:
1378     case AArch64::NAND_PPzPP:
1379     case AArch64::NOR_PPzPP:
1380     case AArch64::ORN_PPzPP:
1381     case AArch64::ORR_PPzPP:
1382     case AArch64::BRKA_PPzP:
1383     case AArch64::BRKPA_PPzPP:
1384     case AArch64::BRKB_PPzP:
1385     case AArch64::BRKPB_PPzPP:
1386     case AArch64::RDFFR_PPz: {
1387       // Check to see if our mask is the same. If not the resulting flag bits
1388       // may be different and we can't remove the ptest.
1389       auto *PredMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg());
1390       if (Mask != PredMask)
1391         return false;
1392       break;
1393     }
1394     case AArch64::BRKN_PPzP: {
1395       // BRKN uses an all active implicit mask to set flags unlike the other
1396       // flag-setting instructions.
1397       // PTEST(PTRUE_B(31), BRKN(PG, A, B)) -> BRKNS(PG, A, B).
1398       if ((MaskOpcode != AArch64::PTRUE_B) ||
1399           (Mask->getOperand(1).getImm() != 31))
1400         return false;
1401       break;
1402     }
1403     case AArch64::PTRUE_B:
1404       // PTEST(OP=PTRUE_B(A), OP) -> PTRUES_B(A)
1405       break;
1406     default:
1407       // Bail out if we don't recognize the input
1408       return false;
1409     }
1410 
1411     NewOp = convertToFlagSettingOpc(PredOpcode);
1412     OpChanged = true;
1413   }
1414 
1415   const TargetRegisterInfo *TRI = &getRegisterInfo();
1416 
1417   // If another instruction between Pred and PTest accesses flags, don't remove
1418   // the ptest or update the earlier instruction to modify them.
1419   if (areCFlagsAccessedBetweenInstrs(Pred, PTest, TRI))
1420     return false;
1421 
1422   // If we pass all the checks, it's safe to remove the PTEST and use the flags
1423   // as they are prior to PTEST. Sometimes this requires the tested PTEST
1424   // operand to be replaced with an equivalent instruction that also sets the
1425   // flags.
1426   Pred->setDesc(get(NewOp));
1427   PTest->eraseFromParent();
1428   if (OpChanged) {
1429     bool succeeded = UpdateOperandRegClass(*Pred);
1430     (void)succeeded;
1431     assert(succeeded && "Operands have incompatible register classes!");
1432     Pred->addRegisterDefined(AArch64::NZCV, TRI);
1433   }
1434 
1435   // Ensure that the flags def is live.
1436   if (Pred->registerDefIsDead(AArch64::NZCV, TRI)) {
1437     unsigned i = 0, e = Pred->getNumOperands();
1438     for (; i != e; ++i) {
1439       MachineOperand &MO = Pred->getOperand(i);
1440       if (MO.isReg() && MO.isDef() && MO.getReg() == AArch64::NZCV) {
1441         MO.setIsDead(false);
1442         break;
1443       }
1444     }
1445   }
1446   return true;
1447 }
1448 
1449 /// Try to optimize a compare instruction. A compare instruction is an
1450 /// instruction which produces AArch64::NZCV. It can be truly compare
1451 /// instruction
1452 /// when there are no uses of its destination register.
1453 ///
1454 /// The following steps are tried in order:
1455 /// 1. Convert CmpInstr into an unconditional version.
1456 /// 2. Remove CmpInstr if above there is an instruction producing a needed
1457 ///    condition code or an instruction which can be converted into such an
1458 ///    instruction.
1459 ///    Only comparison with zero is supported.
optimizeCompareInstr(MachineInstr & CmpInstr,Register SrcReg,Register SrcReg2,int64_t CmpMask,int64_t CmpValue,const MachineRegisterInfo * MRI) const1460 bool AArch64InstrInfo::optimizeCompareInstr(
1461     MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask,
1462     int64_t CmpValue, const MachineRegisterInfo *MRI) const {
1463   assert(CmpInstr.getParent());
1464   assert(MRI);
1465 
1466   // Replace SUBSWrr with SUBWrr if NZCV is not used.
1467   int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
1468   if (DeadNZCVIdx != -1) {
1469     if (CmpInstr.definesRegister(AArch64::WZR) ||
1470         CmpInstr.definesRegister(AArch64::XZR)) {
1471       CmpInstr.eraseFromParent();
1472       return true;
1473     }
1474     unsigned Opc = CmpInstr.getOpcode();
1475     unsigned NewOpc = convertToNonFlagSettingOpc(CmpInstr);
1476     if (NewOpc == Opc)
1477       return false;
1478     const MCInstrDesc &MCID = get(NewOpc);
1479     CmpInstr.setDesc(MCID);
1480     CmpInstr.removeOperand(DeadNZCVIdx);
1481     bool succeeded = UpdateOperandRegClass(CmpInstr);
1482     (void)succeeded;
1483     assert(succeeded && "Some operands reg class are incompatible!");
1484     return true;
1485   }
1486 
1487   if (CmpInstr.getOpcode() == AArch64::PTEST_PP ||
1488       CmpInstr.getOpcode() == AArch64::PTEST_PP_ANY)
1489     return optimizePTestInstr(&CmpInstr, SrcReg, SrcReg2, MRI);
1490 
1491   if (SrcReg2 != 0)
1492     return false;
1493 
1494   // CmpInstr is a Compare instruction if destination register is not used.
1495   if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
1496     return false;
1497 
1498   if (CmpValue == 0 && substituteCmpToZero(CmpInstr, SrcReg, *MRI))
1499     return true;
1500   return (CmpValue == 0 || CmpValue == 1) &&
1501          removeCmpToZeroOrOne(CmpInstr, SrcReg, CmpValue, *MRI);
1502 }
1503 
1504 /// Get opcode of S version of Instr.
1505 /// If Instr is S version its opcode is returned.
1506 /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1507 /// or we are not interested in it.
sForm(MachineInstr & Instr)1508 static unsigned sForm(MachineInstr &Instr) {
1509   switch (Instr.getOpcode()) {
1510   default:
1511     return AArch64::INSTRUCTION_LIST_END;
1512 
1513   case AArch64::ADDSWrr:
1514   case AArch64::ADDSWri:
1515   case AArch64::ADDSXrr:
1516   case AArch64::ADDSXri:
1517   case AArch64::SUBSWrr:
1518   case AArch64::SUBSWri:
1519   case AArch64::SUBSXrr:
1520   case AArch64::SUBSXri:
1521     return Instr.getOpcode();
1522 
1523   case AArch64::ADDWrr:
1524     return AArch64::ADDSWrr;
1525   case AArch64::ADDWri:
1526     return AArch64::ADDSWri;
1527   case AArch64::ADDXrr:
1528     return AArch64::ADDSXrr;
1529   case AArch64::ADDXri:
1530     return AArch64::ADDSXri;
1531   case AArch64::ADCWr:
1532     return AArch64::ADCSWr;
1533   case AArch64::ADCXr:
1534     return AArch64::ADCSXr;
1535   case AArch64::SUBWrr:
1536     return AArch64::SUBSWrr;
1537   case AArch64::SUBWri:
1538     return AArch64::SUBSWri;
1539   case AArch64::SUBXrr:
1540     return AArch64::SUBSXrr;
1541   case AArch64::SUBXri:
1542     return AArch64::SUBSXri;
1543   case AArch64::SBCWr:
1544     return AArch64::SBCSWr;
1545   case AArch64::SBCXr:
1546     return AArch64::SBCSXr;
1547   case AArch64::ANDWri:
1548     return AArch64::ANDSWri;
1549   case AArch64::ANDXri:
1550     return AArch64::ANDSXri;
1551   }
1552 }
1553 
1554 /// Check if AArch64::NZCV should be alive in successors of MBB.
areCFlagsAliveInSuccessors(const MachineBasicBlock * MBB)1555 static bool areCFlagsAliveInSuccessors(const MachineBasicBlock *MBB) {
1556   for (auto *BB : MBB->successors())
1557     if (BB->isLiveIn(AArch64::NZCV))
1558       return true;
1559   return false;
1560 }
1561 
1562 /// \returns The condition code operand index for \p Instr if it is a branch
1563 /// or select and -1 otherwise.
1564 static int
findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr & Instr)1565 findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr) {
1566   switch (Instr.getOpcode()) {
1567   default:
1568     return -1;
1569 
1570   case AArch64::Bcc: {
1571     int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1572     assert(Idx >= 2);
1573     return Idx - 2;
1574   }
1575 
1576   case AArch64::CSINVWr:
1577   case AArch64::CSINVXr:
1578   case AArch64::CSINCWr:
1579   case AArch64::CSINCXr:
1580   case AArch64::CSELWr:
1581   case AArch64::CSELXr:
1582   case AArch64::CSNEGWr:
1583   case AArch64::CSNEGXr:
1584   case AArch64::FCSELSrrr:
1585   case AArch64::FCSELDrrr: {
1586     int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1587     assert(Idx >= 1);
1588     return Idx - 1;
1589   }
1590   }
1591 }
1592 
1593 /// Find a condition code used by the instruction.
1594 /// Returns AArch64CC::Invalid if either the instruction does not use condition
1595 /// codes or we don't optimize CmpInstr in the presence of such instructions.
findCondCodeUsedByInstr(const MachineInstr & Instr)1596 static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
1597   int CCIdx = findCondCodeUseOperandIdxForBranchOrSelect(Instr);
1598   return CCIdx >= 0 ? static_cast<AArch64CC::CondCode>(
1599                           Instr.getOperand(CCIdx).getImm())
1600                     : AArch64CC::Invalid;
1601 }
1602 
getUsedNZCV(AArch64CC::CondCode CC)1603 static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1604   assert(CC != AArch64CC::Invalid);
1605   UsedNZCV UsedFlags;
1606   switch (CC) {
1607   default:
1608     break;
1609 
1610   case AArch64CC::EQ: // Z set
1611   case AArch64CC::NE: // Z clear
1612     UsedFlags.Z = true;
1613     break;
1614 
1615   case AArch64CC::HI: // Z clear and C set
1616   case AArch64CC::LS: // Z set   or  C clear
1617     UsedFlags.Z = true;
1618     [[fallthrough]];
1619   case AArch64CC::HS: // C set
1620   case AArch64CC::LO: // C clear
1621     UsedFlags.C = true;
1622     break;
1623 
1624   case AArch64CC::MI: // N set
1625   case AArch64CC::PL: // N clear
1626     UsedFlags.N = true;
1627     break;
1628 
1629   case AArch64CC::VS: // V set
1630   case AArch64CC::VC: // V clear
1631     UsedFlags.V = true;
1632     break;
1633 
1634   case AArch64CC::GT: // Z clear, N and V the same
1635   case AArch64CC::LE: // Z set,   N and V differ
1636     UsedFlags.Z = true;
1637     [[fallthrough]];
1638   case AArch64CC::GE: // N and V the same
1639   case AArch64CC::LT: // N and V differ
1640     UsedFlags.N = true;
1641     UsedFlags.V = true;
1642     break;
1643   }
1644   return UsedFlags;
1645 }
1646 
1647 /// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
1648 /// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
1649 /// \returns std::nullopt otherwise.
1650 ///
1651 /// Collect instructions using that flags in \p CCUseInstrs if provided.
1652 std::optional<UsedNZCV>
examineCFlagsUse(MachineInstr & MI,MachineInstr & CmpInstr,const TargetRegisterInfo & TRI,SmallVectorImpl<MachineInstr * > * CCUseInstrs)1653 llvm::examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
1654                        const TargetRegisterInfo &TRI,
1655                        SmallVectorImpl<MachineInstr *> *CCUseInstrs) {
1656   MachineBasicBlock *CmpParent = CmpInstr.getParent();
1657   if (MI.getParent() != CmpParent)
1658     return std::nullopt;
1659 
1660   if (areCFlagsAliveInSuccessors(CmpParent))
1661     return std::nullopt;
1662 
1663   UsedNZCV NZCVUsedAfterCmp;
1664   for (MachineInstr &Instr : instructionsWithoutDebug(
1665            std::next(CmpInstr.getIterator()), CmpParent->instr_end())) {
1666     if (Instr.readsRegister(AArch64::NZCV, &TRI)) {
1667       AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
1668       if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1669         return std::nullopt;
1670       NZCVUsedAfterCmp |= getUsedNZCV(CC);
1671       if (CCUseInstrs)
1672         CCUseInstrs->push_back(&Instr);
1673     }
1674     if (Instr.modifiesRegister(AArch64::NZCV, &TRI))
1675       break;
1676   }
1677   return NZCVUsedAfterCmp;
1678 }
1679 
isADDSRegImm(unsigned Opcode)1680 static bool isADDSRegImm(unsigned Opcode) {
1681   return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1682 }
1683 
isSUBSRegImm(unsigned Opcode)1684 static bool isSUBSRegImm(unsigned Opcode) {
1685   return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1686 }
1687 
1688 /// Check if CmpInstr can be substituted by MI.
1689 ///
1690 /// CmpInstr can be substituted:
1691 /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1692 /// - and, MI and CmpInstr are from the same MachineBB
1693 /// - and, condition flags are not alive in successors of the CmpInstr parent
1694 /// - and, if MI opcode is the S form there must be no defs of flags between
1695 ///        MI and CmpInstr
1696 ///        or if MI opcode is not the S form there must be neither defs of flags
1697 ///        nor uses of flags between MI and CmpInstr.
1698 /// - and  C/V flags are not used after CmpInstr
canInstrSubstituteCmpInstr(MachineInstr & MI,MachineInstr & CmpInstr,const TargetRegisterInfo & TRI)1699 static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr,
1700                                        const TargetRegisterInfo &TRI) {
1701   assert(sForm(MI) != AArch64::INSTRUCTION_LIST_END);
1702 
1703   const unsigned CmpOpcode = CmpInstr.getOpcode();
1704   if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1705     return false;
1706 
1707   std::optional<UsedNZCV> NZVCUsed = examineCFlagsUse(MI, CmpInstr, TRI);
1708   if (!NZVCUsed || NZVCUsed->C || NZVCUsed->V)
1709     return false;
1710 
1711   AccessKind AccessToCheck = AK_Write;
1712   if (sForm(MI) != MI.getOpcode())
1713     AccessToCheck = AK_All;
1714   return !areCFlagsAccessedBetweenInstrs(&MI, &CmpInstr, &TRI, AccessToCheck);
1715 }
1716 
1717 /// Substitute an instruction comparing to zero with another instruction
1718 /// which produces needed condition flags.
1719 ///
1720 /// Return true on success.
substituteCmpToZero(MachineInstr & CmpInstr,unsigned SrcReg,const MachineRegisterInfo & MRI) const1721 bool AArch64InstrInfo::substituteCmpToZero(
1722     MachineInstr &CmpInstr, unsigned SrcReg,
1723     const MachineRegisterInfo &MRI) const {
1724   // Get the unique definition of SrcReg.
1725   MachineInstr *MI = MRI.getUniqueVRegDef(SrcReg);
1726   if (!MI)
1727     return false;
1728 
1729   const TargetRegisterInfo &TRI = getRegisterInfo();
1730 
1731   unsigned NewOpc = sForm(*MI);
1732   if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1733     return false;
1734 
1735   if (!canInstrSubstituteCmpInstr(*MI, CmpInstr, TRI))
1736     return false;
1737 
1738   // Update the instruction to set NZCV.
1739   MI->setDesc(get(NewOpc));
1740   CmpInstr.eraseFromParent();
1741   bool succeeded = UpdateOperandRegClass(*MI);
1742   (void)succeeded;
1743   assert(succeeded && "Some operands reg class are incompatible!");
1744   MI->addRegisterDefined(AArch64::NZCV, &TRI);
1745   return true;
1746 }
1747 
1748 /// \returns True if \p CmpInstr can be removed.
1749 ///
1750 /// \p IsInvertCC is true if, after removing \p CmpInstr, condition
1751 /// codes used in \p CCUseInstrs must be inverted.
canCmpInstrBeRemoved(MachineInstr & MI,MachineInstr & CmpInstr,int CmpValue,const TargetRegisterInfo & TRI,SmallVectorImpl<MachineInstr * > & CCUseInstrs,bool & IsInvertCC)1752 static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr,
1753                                  int CmpValue, const TargetRegisterInfo &TRI,
1754                                  SmallVectorImpl<MachineInstr *> &CCUseInstrs,
1755                                  bool &IsInvertCC) {
1756   assert((CmpValue == 0 || CmpValue == 1) &&
1757          "Only comparisons to 0 or 1 considered for removal!");
1758 
1759   // MI is 'CSINCWr %vreg, wzr, wzr, <cc>' or 'CSINCXr %vreg, xzr, xzr, <cc>'
1760   unsigned MIOpc = MI.getOpcode();
1761   if (MIOpc == AArch64::CSINCWr) {
1762     if (MI.getOperand(1).getReg() != AArch64::WZR ||
1763         MI.getOperand(2).getReg() != AArch64::WZR)
1764       return false;
1765   } else if (MIOpc == AArch64::CSINCXr) {
1766     if (MI.getOperand(1).getReg() != AArch64::XZR ||
1767         MI.getOperand(2).getReg() != AArch64::XZR)
1768       return false;
1769   } else {
1770     return false;
1771   }
1772   AArch64CC::CondCode MICC = findCondCodeUsedByInstr(MI);
1773   if (MICC == AArch64CC::Invalid)
1774     return false;
1775 
1776   // NZCV needs to be defined
1777   if (MI.findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
1778     return false;
1779 
1780   // CmpInstr is 'ADDS %vreg, 0' or 'SUBS %vreg, 0' or 'SUBS %vreg, 1'
1781   const unsigned CmpOpcode = CmpInstr.getOpcode();
1782   bool IsSubsRegImm = isSUBSRegImm(CmpOpcode);
1783   if (CmpValue && !IsSubsRegImm)
1784     return false;
1785   if (!CmpValue && !IsSubsRegImm && !isADDSRegImm(CmpOpcode))
1786     return false;
1787 
1788   // MI conditions allowed: eq, ne, mi, pl
1789   UsedNZCV MIUsedNZCV = getUsedNZCV(MICC);
1790   if (MIUsedNZCV.C || MIUsedNZCV.V)
1791     return false;
1792 
1793   std::optional<UsedNZCV> NZCVUsedAfterCmp =
1794       examineCFlagsUse(MI, CmpInstr, TRI, &CCUseInstrs);
1795   // Condition flags are not used in CmpInstr basic block successors and only
1796   // Z or N flags allowed to be used after CmpInstr within its basic block
1797   if (!NZCVUsedAfterCmp || NZCVUsedAfterCmp->C || NZCVUsedAfterCmp->V)
1798     return false;
1799   // Z or N flag used after CmpInstr must correspond to the flag used in MI
1800   if ((MIUsedNZCV.Z && NZCVUsedAfterCmp->N) ||
1801       (MIUsedNZCV.N && NZCVUsedAfterCmp->Z))
1802     return false;
1803   // If CmpInstr is comparison to zero MI conditions are limited to eq, ne
1804   if (MIUsedNZCV.N && !CmpValue)
1805     return false;
1806 
1807   // There must be no defs of flags between MI and CmpInstr
1808   if (areCFlagsAccessedBetweenInstrs(&MI, &CmpInstr, &TRI, AK_Write))
1809     return false;
1810 
1811   // Condition code is inverted in the following cases:
1812   // 1. MI condition is ne; CmpInstr is 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1813   // 2. MI condition is eq, pl; CmpInstr is 'SUBS %vreg, 1'
1814   IsInvertCC = (CmpValue && (MICC == AArch64CC::EQ || MICC == AArch64CC::PL)) ||
1815                (!CmpValue && MICC == AArch64CC::NE);
1816   return true;
1817 }
1818 
1819 /// Remove comparison in csinc-cmp sequence
1820 ///
1821 /// Examples:
1822 /// 1. \code
1823 ///   csinc w9, wzr, wzr, ne
1824 ///   cmp   w9, #0
1825 ///   b.eq
1826 ///    \endcode
1827 /// to
1828 ///    \code
1829 ///   csinc w9, wzr, wzr, ne
1830 ///   b.ne
1831 ///    \endcode
1832 ///
1833 /// 2. \code
1834 ///   csinc x2, xzr, xzr, mi
1835 ///   cmp   x2, #1
1836 ///   b.pl
1837 ///    \endcode
1838 /// to
1839 ///    \code
1840 ///   csinc x2, xzr, xzr, mi
1841 ///   b.pl
1842 ///    \endcode
1843 ///
1844 /// \param  CmpInstr comparison instruction
1845 /// \return True when comparison removed
removeCmpToZeroOrOne(MachineInstr & CmpInstr,unsigned SrcReg,int CmpValue,const MachineRegisterInfo & MRI) const1846 bool AArch64InstrInfo::removeCmpToZeroOrOne(
1847     MachineInstr &CmpInstr, unsigned SrcReg, int CmpValue,
1848     const MachineRegisterInfo &MRI) const {
1849   MachineInstr *MI = MRI.getUniqueVRegDef(SrcReg);
1850   if (!MI)
1851     return false;
1852   const TargetRegisterInfo &TRI = getRegisterInfo();
1853   SmallVector<MachineInstr *, 4> CCUseInstrs;
1854   bool IsInvertCC = false;
1855   if (!canCmpInstrBeRemoved(*MI, CmpInstr, CmpValue, TRI, CCUseInstrs,
1856                             IsInvertCC))
1857     return false;
1858   // Make transformation
1859   CmpInstr.eraseFromParent();
1860   if (IsInvertCC) {
1861     // Invert condition codes in CmpInstr CC users
1862     for (MachineInstr *CCUseInstr : CCUseInstrs) {
1863       int Idx = findCondCodeUseOperandIdxForBranchOrSelect(*CCUseInstr);
1864       assert(Idx >= 0 && "Unexpected instruction using CC.");
1865       MachineOperand &CCOperand = CCUseInstr->getOperand(Idx);
1866       AArch64CC::CondCode CCUse = AArch64CC::getInvertedCondCode(
1867           static_cast<AArch64CC::CondCode>(CCOperand.getImm()));
1868       CCOperand.setImm(CCUse);
1869     }
1870   }
1871   return true;
1872 }
1873 
expandPostRAPseudo(MachineInstr & MI) const1874 bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1875   if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD &&
1876       MI.getOpcode() != AArch64::CATCHRET)
1877     return false;
1878 
1879   MachineBasicBlock &MBB = *MI.getParent();
1880   auto &Subtarget = MBB.getParent()->getSubtarget<AArch64Subtarget>();
1881   auto TRI = Subtarget.getRegisterInfo();
1882   DebugLoc DL = MI.getDebugLoc();
1883 
1884   if (MI.getOpcode() == AArch64::CATCHRET) {
1885     // Skip to the first instruction before the epilog.
1886     const TargetInstrInfo *TII =
1887       MBB.getParent()->getSubtarget().getInstrInfo();
1888     MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
1889     auto MBBI = MachineBasicBlock::iterator(MI);
1890     MachineBasicBlock::iterator FirstEpilogSEH = std::prev(MBBI);
1891     while (FirstEpilogSEH->getFlag(MachineInstr::FrameDestroy) &&
1892            FirstEpilogSEH != MBB.begin())
1893       FirstEpilogSEH = std::prev(FirstEpilogSEH);
1894     if (FirstEpilogSEH != MBB.begin())
1895       FirstEpilogSEH = std::next(FirstEpilogSEH);
1896     BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADRP))
1897         .addReg(AArch64::X0, RegState::Define)
1898         .addMBB(TargetMBB);
1899     BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADDXri))
1900         .addReg(AArch64::X0, RegState::Define)
1901         .addReg(AArch64::X0)
1902         .addMBB(TargetMBB)
1903         .addImm(0);
1904     return true;
1905   }
1906 
1907   Register Reg = MI.getOperand(0).getReg();
1908   Module &M = *MBB.getParent()->getFunction().getParent();
1909   if (M.getStackProtectorGuard() == "sysreg") {
1910     const AArch64SysReg::SysReg *SrcReg =
1911         AArch64SysReg::lookupSysRegByName(M.getStackProtectorGuardReg());
1912     if (!SrcReg)
1913       report_fatal_error("Unknown SysReg for Stack Protector Guard Register");
1914 
1915     // mrs xN, sysreg
1916     BuildMI(MBB, MI, DL, get(AArch64::MRS))
1917         .addDef(Reg, RegState::Renamable)
1918         .addImm(SrcReg->Encoding);
1919     int Offset = M.getStackProtectorGuardOffset();
1920     if (Offset >= 0 && Offset <= 32760 && Offset % 8 == 0) {
1921       // ldr xN, [xN, #offset]
1922       BuildMI(MBB, MI, DL, get(AArch64::LDRXui))
1923           .addDef(Reg)
1924           .addUse(Reg, RegState::Kill)
1925           .addImm(Offset / 8);
1926     } else if (Offset >= -256 && Offset <= 255) {
1927       // ldur xN, [xN, #offset]
1928       BuildMI(MBB, MI, DL, get(AArch64::LDURXi))
1929           .addDef(Reg)
1930           .addUse(Reg, RegState::Kill)
1931           .addImm(Offset);
1932     } else if (Offset >= -4095 && Offset <= 4095) {
1933       if (Offset > 0) {
1934         // add xN, xN, #offset
1935         BuildMI(MBB, MI, DL, get(AArch64::ADDXri))
1936             .addDef(Reg)
1937             .addUse(Reg, RegState::Kill)
1938             .addImm(Offset)
1939             .addImm(0);
1940       } else {
1941         // sub xN, xN, #offset
1942         BuildMI(MBB, MI, DL, get(AArch64::SUBXri))
1943             .addDef(Reg)
1944             .addUse(Reg, RegState::Kill)
1945             .addImm(-Offset)
1946             .addImm(0);
1947       }
1948       // ldr xN, [xN]
1949       BuildMI(MBB, MI, DL, get(AArch64::LDRXui))
1950           .addDef(Reg)
1951           .addUse(Reg, RegState::Kill)
1952           .addImm(0);
1953     } else {
1954       // Cases that are larger than +/- 4095 and not a multiple of 8, or larger
1955       // than 23760.
1956       // It might be nice to use AArch64::MOVi32imm here, which would get
1957       // expanded in PreSched2 after PostRA, but our lone scratch Reg already
1958       // contains the MRS result. findScratchNonCalleeSaveRegister() in
1959       // AArch64FrameLowering might help us find such a scratch register
1960       // though. If we failed to find a scratch register, we could emit a
1961       // stream of add instructions to build up the immediate. Or, we could try
1962       // to insert a AArch64::MOVi32imm before register allocation so that we
1963       // didn't need to scavenge for a scratch register.
1964       report_fatal_error("Unable to encode Stack Protector Guard Offset");
1965     }
1966     MBB.erase(MI);
1967     return true;
1968   }
1969 
1970   const GlobalValue *GV =
1971       cast<GlobalValue>((*MI.memoperands_begin())->getValue());
1972   const TargetMachine &TM = MBB.getParent()->getTarget();
1973   unsigned OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1974   const unsigned char MO_NC = AArch64II::MO_NC;
1975 
1976   if ((OpFlags & AArch64II::MO_GOT) != 0) {
1977     BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1978         .addGlobalAddress(GV, 0, OpFlags);
1979     if (Subtarget.isTargetILP32()) {
1980       unsigned Reg32 = TRI->getSubReg(Reg, AArch64::sub_32);
1981       BuildMI(MBB, MI, DL, get(AArch64::LDRWui))
1982           .addDef(Reg32, RegState::Dead)
1983           .addUse(Reg, RegState::Kill)
1984           .addImm(0)
1985           .addMemOperand(*MI.memoperands_begin())
1986           .addDef(Reg, RegState::Implicit);
1987     } else {
1988       BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1989           .addReg(Reg, RegState::Kill)
1990           .addImm(0)
1991           .addMemOperand(*MI.memoperands_begin());
1992     }
1993   } else if (TM.getCodeModel() == CodeModel::Large) {
1994     assert(!Subtarget.isTargetILP32() && "how can large exist in ILP32?");
1995     BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1996         .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC)
1997         .addImm(0);
1998     BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1999         .addReg(Reg, RegState::Kill)
2000         .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC)
2001         .addImm(16);
2002     BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
2003         .addReg(Reg, RegState::Kill)
2004         .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC)
2005         .addImm(32);
2006     BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
2007         .addReg(Reg, RegState::Kill)
2008         .addGlobalAddress(GV, 0, AArch64II::MO_G3)
2009         .addImm(48);
2010     BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
2011         .addReg(Reg, RegState::Kill)
2012         .addImm(0)
2013         .addMemOperand(*MI.memoperands_begin());
2014   } else if (TM.getCodeModel() == CodeModel::Tiny) {
2015     BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg)
2016         .addGlobalAddress(GV, 0, OpFlags);
2017   } else {
2018     BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
2019         .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
2020     unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
2021     if (Subtarget.isTargetILP32()) {
2022       unsigned Reg32 = TRI->getSubReg(Reg, AArch64::sub_32);
2023       BuildMI(MBB, MI, DL, get(AArch64::LDRWui))
2024           .addDef(Reg32, RegState::Dead)
2025           .addUse(Reg, RegState::Kill)
2026           .addGlobalAddress(GV, 0, LoFlags)
2027           .addMemOperand(*MI.memoperands_begin())
2028           .addDef(Reg, RegState::Implicit);
2029     } else {
2030       BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
2031           .addReg(Reg, RegState::Kill)
2032           .addGlobalAddress(GV, 0, LoFlags)
2033           .addMemOperand(*MI.memoperands_begin());
2034     }
2035   }
2036 
2037   MBB.erase(MI);
2038 
2039   return true;
2040 }
2041 
2042 // Return true if this instruction simply sets its single destination register
2043 // to zero. This is equivalent to a register rename of the zero-register.
isGPRZero(const MachineInstr & MI)2044 bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) {
2045   switch (MI.getOpcode()) {
2046   default:
2047     break;
2048   case AArch64::MOVZWi:
2049   case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
2050     if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
2051       assert(MI.getDesc().getNumOperands() == 3 &&
2052              MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
2053       return true;
2054     }
2055     break;
2056   case AArch64::ANDWri: // and Rd, Rzr, #imm
2057     return MI.getOperand(1).getReg() == AArch64::WZR;
2058   case AArch64::ANDXri:
2059     return MI.getOperand(1).getReg() == AArch64::XZR;
2060   case TargetOpcode::COPY:
2061     return MI.getOperand(1).getReg() == AArch64::WZR;
2062   }
2063   return false;
2064 }
2065 
2066 // Return true if this instruction simply renames a general register without
2067 // modifying bits.
isGPRCopy(const MachineInstr & MI)2068 bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) {
2069   switch (MI.getOpcode()) {
2070   default:
2071     break;
2072   case TargetOpcode::COPY: {
2073     // GPR32 copies will by lowered to ORRXrs
2074     Register DstReg = MI.getOperand(0).getReg();
2075     return (AArch64::GPR32RegClass.contains(DstReg) ||
2076             AArch64::GPR64RegClass.contains(DstReg));
2077   }
2078   case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
2079     if (MI.getOperand(1).getReg() == AArch64::XZR) {
2080       assert(MI.getDesc().getNumOperands() == 4 &&
2081              MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
2082       return true;
2083     }
2084     break;
2085   case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
2086     if (MI.getOperand(2).getImm() == 0) {
2087       assert(MI.getDesc().getNumOperands() == 4 &&
2088              MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
2089       return true;
2090     }
2091     break;
2092   }
2093   return false;
2094 }
2095 
2096 // Return true if this instruction simply renames a general register without
2097 // modifying bits.
isFPRCopy(const MachineInstr & MI)2098 bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) {
2099   switch (MI.getOpcode()) {
2100   default:
2101     break;
2102   case TargetOpcode::COPY: {
2103     Register DstReg = MI.getOperand(0).getReg();
2104     return AArch64::FPR128RegClass.contains(DstReg);
2105   }
2106   case AArch64::ORRv16i8:
2107     if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
2108       assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
2109              "invalid ORRv16i8 operands");
2110       return true;
2111     }
2112     break;
2113   }
2114   return false;
2115 }
2116 
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex) const2117 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
2118                                                int &FrameIndex) const {
2119   switch (MI.getOpcode()) {
2120   default:
2121     break;
2122   case AArch64::LDRWui:
2123   case AArch64::LDRXui:
2124   case AArch64::LDRBui:
2125   case AArch64::LDRHui:
2126   case AArch64::LDRSui:
2127   case AArch64::LDRDui:
2128   case AArch64::LDRQui:
2129     if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
2130         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
2131       FrameIndex = MI.getOperand(1).getIndex();
2132       return MI.getOperand(0).getReg();
2133     }
2134     break;
2135   }
2136 
2137   return 0;
2138 }
2139 
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex) const2140 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
2141                                               int &FrameIndex) const {
2142   switch (MI.getOpcode()) {
2143   default:
2144     break;
2145   case AArch64::STRWui:
2146   case AArch64::STRXui:
2147   case AArch64::STRBui:
2148   case AArch64::STRHui:
2149   case AArch64::STRSui:
2150   case AArch64::STRDui:
2151   case AArch64::STRQui:
2152   case AArch64::LDR_PXI:
2153   case AArch64::STR_PXI:
2154     if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
2155         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
2156       FrameIndex = MI.getOperand(1).getIndex();
2157       return MI.getOperand(0).getReg();
2158     }
2159     break;
2160   }
2161   return 0;
2162 }
2163 
2164 /// Check all MachineMemOperands for a hint to suppress pairing.
isLdStPairSuppressed(const MachineInstr & MI)2165 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) {
2166   return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
2167     return MMO->getFlags() & MOSuppressPair;
2168   });
2169 }
2170 
2171 /// Set a flag on the first MachineMemOperand to suppress pairing.
suppressLdStPair(MachineInstr & MI)2172 void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) {
2173   if (MI.memoperands_empty())
2174     return;
2175   (*MI.memoperands_begin())->setFlags(MOSuppressPair);
2176 }
2177 
2178 /// Check all MachineMemOperands for a hint that the load/store is strided.
isStridedAccess(const MachineInstr & MI)2179 bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) {
2180   return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
2181     return MMO->getFlags() & MOStridedAccess;
2182   });
2183 }
2184 
hasUnscaledLdStOffset(unsigned Opc)2185 bool AArch64InstrInfo::hasUnscaledLdStOffset(unsigned Opc) {
2186   switch (Opc) {
2187   default:
2188     return false;
2189   case AArch64::STURSi:
2190   case AArch64::STRSpre:
2191   case AArch64::STURDi:
2192   case AArch64::STRDpre:
2193   case AArch64::STURQi:
2194   case AArch64::STRQpre:
2195   case AArch64::STURBBi:
2196   case AArch64::STURHHi:
2197   case AArch64::STURWi:
2198   case AArch64::STRWpre:
2199   case AArch64::STURXi:
2200   case AArch64::STRXpre:
2201   case AArch64::LDURSi:
2202   case AArch64::LDRSpre:
2203   case AArch64::LDURDi:
2204   case AArch64::LDRDpre:
2205   case AArch64::LDURQi:
2206   case AArch64::LDRQpre:
2207   case AArch64::LDURWi:
2208   case AArch64::LDRWpre:
2209   case AArch64::LDURXi:
2210   case AArch64::LDRXpre:
2211   case AArch64::LDURSWi:
2212   case AArch64::LDURHHi:
2213   case AArch64::LDURBBi:
2214   case AArch64::LDURSBWi:
2215   case AArch64::LDURSHWi:
2216     return true;
2217   }
2218 }
2219 
getUnscaledLdSt(unsigned Opc)2220 std::optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) {
2221   switch (Opc) {
2222   default: return {};
2223   case AArch64::PRFMui: return AArch64::PRFUMi;
2224   case AArch64::LDRXui: return AArch64::LDURXi;
2225   case AArch64::LDRWui: return AArch64::LDURWi;
2226   case AArch64::LDRBui: return AArch64::LDURBi;
2227   case AArch64::LDRHui: return AArch64::LDURHi;
2228   case AArch64::LDRSui: return AArch64::LDURSi;
2229   case AArch64::LDRDui: return AArch64::LDURDi;
2230   case AArch64::LDRQui: return AArch64::LDURQi;
2231   case AArch64::LDRBBui: return AArch64::LDURBBi;
2232   case AArch64::LDRHHui: return AArch64::LDURHHi;
2233   case AArch64::LDRSBXui: return AArch64::LDURSBXi;
2234   case AArch64::LDRSBWui: return AArch64::LDURSBWi;
2235   case AArch64::LDRSHXui: return AArch64::LDURSHXi;
2236   case AArch64::LDRSHWui: return AArch64::LDURSHWi;
2237   case AArch64::LDRSWui: return AArch64::LDURSWi;
2238   case AArch64::STRXui: return AArch64::STURXi;
2239   case AArch64::STRWui: return AArch64::STURWi;
2240   case AArch64::STRBui: return AArch64::STURBi;
2241   case AArch64::STRHui: return AArch64::STURHi;
2242   case AArch64::STRSui: return AArch64::STURSi;
2243   case AArch64::STRDui: return AArch64::STURDi;
2244   case AArch64::STRQui: return AArch64::STURQi;
2245   case AArch64::STRBBui: return AArch64::STURBBi;
2246   case AArch64::STRHHui: return AArch64::STURHHi;
2247   }
2248 }
2249 
getLoadStoreImmIdx(unsigned Opc)2250 unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
2251   switch (Opc) {
2252   default:
2253     return 2;
2254   case AArch64::LDPXi:
2255   case AArch64::LDPDi:
2256   case AArch64::STPXi:
2257   case AArch64::STPDi:
2258   case AArch64::LDNPXi:
2259   case AArch64::LDNPDi:
2260   case AArch64::STNPXi:
2261   case AArch64::STNPDi:
2262   case AArch64::LDPQi:
2263   case AArch64::STPQi:
2264   case AArch64::LDNPQi:
2265   case AArch64::STNPQi:
2266   case AArch64::LDPWi:
2267   case AArch64::LDPSi:
2268   case AArch64::STPWi:
2269   case AArch64::STPSi:
2270   case AArch64::LDNPWi:
2271   case AArch64::LDNPSi:
2272   case AArch64::STNPWi:
2273   case AArch64::STNPSi:
2274   case AArch64::LDG:
2275   case AArch64::STGPi:
2276 
2277   case AArch64::LD1B_IMM:
2278   case AArch64::LD1B_H_IMM:
2279   case AArch64::LD1B_S_IMM:
2280   case AArch64::LD1B_D_IMM:
2281   case AArch64::LD1SB_H_IMM:
2282   case AArch64::LD1SB_S_IMM:
2283   case AArch64::LD1SB_D_IMM:
2284   case AArch64::LD1H_IMM:
2285   case AArch64::LD1H_S_IMM:
2286   case AArch64::LD1H_D_IMM:
2287   case AArch64::LD1SH_S_IMM:
2288   case AArch64::LD1SH_D_IMM:
2289   case AArch64::LD1W_IMM:
2290   case AArch64::LD1W_D_IMM:
2291   case AArch64::LD1SW_D_IMM:
2292   case AArch64::LD1D_IMM:
2293 
2294   case AArch64::LD2B_IMM:
2295   case AArch64::LD2H_IMM:
2296   case AArch64::LD2W_IMM:
2297   case AArch64::LD2D_IMM:
2298   case AArch64::LD3B_IMM:
2299   case AArch64::LD3H_IMM:
2300   case AArch64::LD3W_IMM:
2301   case AArch64::LD3D_IMM:
2302   case AArch64::LD4B_IMM:
2303   case AArch64::LD4H_IMM:
2304   case AArch64::LD4W_IMM:
2305   case AArch64::LD4D_IMM:
2306 
2307   case AArch64::ST1B_IMM:
2308   case AArch64::ST1B_H_IMM:
2309   case AArch64::ST1B_S_IMM:
2310   case AArch64::ST1B_D_IMM:
2311   case AArch64::ST1H_IMM:
2312   case AArch64::ST1H_S_IMM:
2313   case AArch64::ST1H_D_IMM:
2314   case AArch64::ST1W_IMM:
2315   case AArch64::ST1W_D_IMM:
2316   case AArch64::ST1D_IMM:
2317 
2318   case AArch64::ST2B_IMM:
2319   case AArch64::ST2H_IMM:
2320   case AArch64::ST2W_IMM:
2321   case AArch64::ST2D_IMM:
2322   case AArch64::ST3B_IMM:
2323   case AArch64::ST3H_IMM:
2324   case AArch64::ST3W_IMM:
2325   case AArch64::ST3D_IMM:
2326   case AArch64::ST4B_IMM:
2327   case AArch64::ST4H_IMM:
2328   case AArch64::ST4W_IMM:
2329   case AArch64::ST4D_IMM:
2330 
2331   case AArch64::LD1RB_IMM:
2332   case AArch64::LD1RB_H_IMM:
2333   case AArch64::LD1RB_S_IMM:
2334   case AArch64::LD1RB_D_IMM:
2335   case AArch64::LD1RSB_H_IMM:
2336   case AArch64::LD1RSB_S_IMM:
2337   case AArch64::LD1RSB_D_IMM:
2338   case AArch64::LD1RH_IMM:
2339   case AArch64::LD1RH_S_IMM:
2340   case AArch64::LD1RH_D_IMM:
2341   case AArch64::LD1RSH_S_IMM:
2342   case AArch64::LD1RSH_D_IMM:
2343   case AArch64::LD1RW_IMM:
2344   case AArch64::LD1RW_D_IMM:
2345   case AArch64::LD1RSW_IMM:
2346   case AArch64::LD1RD_IMM:
2347 
2348   case AArch64::LDNT1B_ZRI:
2349   case AArch64::LDNT1H_ZRI:
2350   case AArch64::LDNT1W_ZRI:
2351   case AArch64::LDNT1D_ZRI:
2352   case AArch64::STNT1B_ZRI:
2353   case AArch64::STNT1H_ZRI:
2354   case AArch64::STNT1W_ZRI:
2355   case AArch64::STNT1D_ZRI:
2356 
2357   case AArch64::LDNF1B_IMM:
2358   case AArch64::LDNF1B_H_IMM:
2359   case AArch64::LDNF1B_S_IMM:
2360   case AArch64::LDNF1B_D_IMM:
2361   case AArch64::LDNF1SB_H_IMM:
2362   case AArch64::LDNF1SB_S_IMM:
2363   case AArch64::LDNF1SB_D_IMM:
2364   case AArch64::LDNF1H_IMM:
2365   case AArch64::LDNF1H_S_IMM:
2366   case AArch64::LDNF1H_D_IMM:
2367   case AArch64::LDNF1SH_S_IMM:
2368   case AArch64::LDNF1SH_D_IMM:
2369   case AArch64::LDNF1W_IMM:
2370   case AArch64::LDNF1W_D_IMM:
2371   case AArch64::LDNF1SW_D_IMM:
2372   case AArch64::LDNF1D_IMM:
2373     return 3;
2374   case AArch64::ADDG:
2375   case AArch64::STGOffset:
2376   case AArch64::LDR_PXI:
2377   case AArch64::STR_PXI:
2378     return 2;
2379   }
2380 }
2381 
isPairableLdStInst(const MachineInstr & MI)2382 bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) {
2383   switch (MI.getOpcode()) {
2384   default:
2385     return false;
2386   // Scaled instructions.
2387   case AArch64::STRSui:
2388   case AArch64::STRDui:
2389   case AArch64::STRQui:
2390   case AArch64::STRXui:
2391   case AArch64::STRWui:
2392   case AArch64::LDRSui:
2393   case AArch64::LDRDui:
2394   case AArch64::LDRQui:
2395   case AArch64::LDRXui:
2396   case AArch64::LDRWui:
2397   case AArch64::LDRSWui:
2398   // Unscaled instructions.
2399   case AArch64::STURSi:
2400   case AArch64::STRSpre:
2401   case AArch64::STURDi:
2402   case AArch64::STRDpre:
2403   case AArch64::STURQi:
2404   case AArch64::STRQpre:
2405   case AArch64::STURWi:
2406   case AArch64::STRWpre:
2407   case AArch64::STURXi:
2408   case AArch64::STRXpre:
2409   case AArch64::LDURSi:
2410   case AArch64::LDRSpre:
2411   case AArch64::LDURDi:
2412   case AArch64::LDRDpre:
2413   case AArch64::LDURQi:
2414   case AArch64::LDRQpre:
2415   case AArch64::LDURWi:
2416   case AArch64::LDRWpre:
2417   case AArch64::LDURXi:
2418   case AArch64::LDRXpre:
2419   case AArch64::LDURSWi:
2420     return true;
2421   }
2422 }
2423 
convertToFlagSettingOpc(unsigned Opc)2424 unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc) {
2425   switch (Opc) {
2426   default:
2427     llvm_unreachable("Opcode has no flag setting equivalent!");
2428   // 32-bit cases:
2429   case AArch64::ADDWri:
2430     return AArch64::ADDSWri;
2431   case AArch64::ADDWrr:
2432     return AArch64::ADDSWrr;
2433   case AArch64::ADDWrs:
2434     return AArch64::ADDSWrs;
2435   case AArch64::ADDWrx:
2436     return AArch64::ADDSWrx;
2437   case AArch64::ANDWri:
2438     return AArch64::ANDSWri;
2439   case AArch64::ANDWrr:
2440     return AArch64::ANDSWrr;
2441   case AArch64::ANDWrs:
2442     return AArch64::ANDSWrs;
2443   case AArch64::BICWrr:
2444     return AArch64::BICSWrr;
2445   case AArch64::BICWrs:
2446     return AArch64::BICSWrs;
2447   case AArch64::SUBWri:
2448     return AArch64::SUBSWri;
2449   case AArch64::SUBWrr:
2450     return AArch64::SUBSWrr;
2451   case AArch64::SUBWrs:
2452     return AArch64::SUBSWrs;
2453   case AArch64::SUBWrx:
2454     return AArch64::SUBSWrx;
2455   // 64-bit cases:
2456   case AArch64::ADDXri:
2457     return AArch64::ADDSXri;
2458   case AArch64::ADDXrr:
2459     return AArch64::ADDSXrr;
2460   case AArch64::ADDXrs:
2461     return AArch64::ADDSXrs;
2462   case AArch64::ADDXrx:
2463     return AArch64::ADDSXrx;
2464   case AArch64::ANDXri:
2465     return AArch64::ANDSXri;
2466   case AArch64::ANDXrr:
2467     return AArch64::ANDSXrr;
2468   case AArch64::ANDXrs:
2469     return AArch64::ANDSXrs;
2470   case AArch64::BICXrr:
2471     return AArch64::BICSXrr;
2472   case AArch64::BICXrs:
2473     return AArch64::BICSXrs;
2474   case AArch64::SUBXri:
2475     return AArch64::SUBSXri;
2476   case AArch64::SUBXrr:
2477     return AArch64::SUBSXrr;
2478   case AArch64::SUBXrs:
2479     return AArch64::SUBSXrs;
2480   case AArch64::SUBXrx:
2481     return AArch64::SUBSXrx;
2482   // SVE instructions:
2483   case AArch64::AND_PPzPP:
2484     return AArch64::ANDS_PPzPP;
2485   case AArch64::BIC_PPzPP:
2486     return AArch64::BICS_PPzPP;
2487   case AArch64::EOR_PPzPP:
2488     return AArch64::EORS_PPzPP;
2489   case AArch64::NAND_PPzPP:
2490     return AArch64::NANDS_PPzPP;
2491   case AArch64::NOR_PPzPP:
2492     return AArch64::NORS_PPzPP;
2493   case AArch64::ORN_PPzPP:
2494     return AArch64::ORNS_PPzPP;
2495   case AArch64::ORR_PPzPP:
2496     return AArch64::ORRS_PPzPP;
2497   case AArch64::BRKA_PPzP:
2498     return AArch64::BRKAS_PPzP;
2499   case AArch64::BRKPA_PPzPP:
2500     return AArch64::BRKPAS_PPzPP;
2501   case AArch64::BRKB_PPzP:
2502     return AArch64::BRKBS_PPzP;
2503   case AArch64::BRKPB_PPzPP:
2504     return AArch64::BRKPBS_PPzPP;
2505   case AArch64::BRKN_PPzP:
2506     return AArch64::BRKNS_PPzP;
2507   case AArch64::RDFFR_PPz:
2508     return AArch64::RDFFRS_PPz;
2509   case AArch64::PTRUE_B:
2510     return AArch64::PTRUES_B;
2511   }
2512 }
2513 
2514 // Is this a candidate for ld/st merging or pairing?  For example, we don't
2515 // touch volatiles or load/stores that have a hint to avoid pair formation.
isCandidateToMergeOrPair(const MachineInstr & MI) const2516 bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
2517 
2518   bool IsPreLdSt = isPreLdSt(MI);
2519 
2520   // If this is a volatile load/store, don't mess with it.
2521   if (MI.hasOrderedMemoryRef())
2522     return false;
2523 
2524   // Make sure this is a reg/fi+imm (as opposed to an address reloc).
2525   // For Pre-inc LD/ST, the operand is shifted by one.
2526   assert((MI.getOperand(IsPreLdSt ? 2 : 1).isReg() ||
2527           MI.getOperand(IsPreLdSt ? 2 : 1).isFI()) &&
2528          "Expected a reg or frame index operand.");
2529 
2530   // For Pre-indexed addressing quadword instructions, the third operand is the
2531   // immediate value.
2532   bool IsImmPreLdSt = IsPreLdSt && MI.getOperand(3).isImm();
2533 
2534   if (!MI.getOperand(2).isImm() && !IsImmPreLdSt)
2535     return false;
2536 
2537   // Can't merge/pair if the instruction modifies the base register.
2538   // e.g., ldr x0, [x0]
2539   // This case will never occur with an FI base.
2540   // However, if the instruction is an LDR/STR<S,D,Q,W,X>pre, it can be merged.
2541   // For example:
2542   //   ldr q0, [x11, #32]!
2543   //   ldr q1, [x11, #16]
2544   //   to
2545   //   ldp q0, q1, [x11, #32]!
2546   if (MI.getOperand(1).isReg() && !IsPreLdSt) {
2547     Register BaseReg = MI.getOperand(1).getReg();
2548     const TargetRegisterInfo *TRI = &getRegisterInfo();
2549     if (MI.modifiesRegister(BaseReg, TRI))
2550       return false;
2551   }
2552 
2553   // Check if this load/store has a hint to avoid pair formation.
2554   // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
2555   if (isLdStPairSuppressed(MI))
2556     return false;
2557 
2558   // Do not pair any callee-save store/reload instructions in the
2559   // prologue/epilogue if the CFI information encoded the operations as separate
2560   // instructions, as that will cause the size of the actual prologue to mismatch
2561   // with the prologue size recorded in the Windows CFI.
2562   const MCAsmInfo *MAI = MI.getMF()->getTarget().getMCAsmInfo();
2563   bool NeedsWinCFI = MAI->usesWindowsCFI() &&
2564                      MI.getMF()->getFunction().needsUnwindTableEntry();
2565   if (NeedsWinCFI && (MI.getFlag(MachineInstr::FrameSetup) ||
2566                       MI.getFlag(MachineInstr::FrameDestroy)))
2567     return false;
2568 
2569   // On some CPUs quad load/store pairs are slower than two single load/stores.
2570   if (Subtarget.isPaired128Slow()) {
2571     switch (MI.getOpcode()) {
2572     default:
2573       break;
2574     case AArch64::LDURQi:
2575     case AArch64::STURQi:
2576     case AArch64::LDRQui:
2577     case AArch64::STRQui:
2578       return false;
2579     }
2580   }
2581 
2582   return true;
2583 }
2584 
getMemOperandsWithOffsetWidth(const MachineInstr & LdSt,SmallVectorImpl<const MachineOperand * > & BaseOps,int64_t & Offset,bool & OffsetIsScalable,unsigned & Width,const TargetRegisterInfo * TRI) const2585 bool AArch64InstrInfo::getMemOperandsWithOffsetWidth(
2586     const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
2587     int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
2588     const TargetRegisterInfo *TRI) const {
2589   if (!LdSt.mayLoadOrStore())
2590     return false;
2591 
2592   const MachineOperand *BaseOp;
2593   if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, OffsetIsScalable,
2594                                     Width, TRI))
2595     return false;
2596   BaseOps.push_back(BaseOp);
2597   return true;
2598 }
2599 
2600 std::optional<ExtAddrMode>
getAddrModeFromMemoryOp(const MachineInstr & MemI,const TargetRegisterInfo * TRI) const2601 AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
2602                                           const TargetRegisterInfo *TRI) const {
2603   const MachineOperand *Base; // Filled with the base operand of MI.
2604   int64_t Offset;             // Filled with the offset of MI.
2605   bool OffsetIsScalable;
2606   if (!getMemOperandWithOffset(MemI, Base, Offset, OffsetIsScalable, TRI))
2607     return std::nullopt;
2608 
2609   if (!Base->isReg())
2610     return std::nullopt;
2611   ExtAddrMode AM;
2612   AM.BaseReg = Base->getReg();
2613   AM.Displacement = Offset;
2614   AM.ScaledReg = 0;
2615   AM.Scale = 0;
2616   return AM;
2617 }
2618 
getMemOperandWithOffsetWidth(const MachineInstr & LdSt,const MachineOperand * & BaseOp,int64_t & Offset,bool & OffsetIsScalable,unsigned & Width,const TargetRegisterInfo * TRI) const2619 bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
2620     const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
2621     bool &OffsetIsScalable, unsigned &Width,
2622     const TargetRegisterInfo *TRI) const {
2623   assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
2624   // Handle only loads/stores with base register followed by immediate offset.
2625   if (LdSt.getNumExplicitOperands() == 3) {
2626     // Non-paired instruction (e.g., ldr x1, [x0, #8]).
2627     if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) ||
2628         !LdSt.getOperand(2).isImm())
2629       return false;
2630   } else if (LdSt.getNumExplicitOperands() == 4) {
2631     // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
2632     if (!LdSt.getOperand(1).isReg() ||
2633         (!LdSt.getOperand(2).isReg() && !LdSt.getOperand(2).isFI()) ||
2634         !LdSt.getOperand(3).isImm())
2635       return false;
2636   } else
2637     return false;
2638 
2639   // Get the scaling factor for the instruction and set the width for the
2640   // instruction.
2641   TypeSize Scale(0U, false);
2642   int64_t Dummy1, Dummy2;
2643 
2644   // If this returns false, then it's an instruction we don't want to handle.
2645   if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2))
2646     return false;
2647 
2648   // Compute the offset. Offset is calculated as the immediate operand
2649   // multiplied by the scaling factor. Unscaled instructions have scaling factor
2650   // set to 1.
2651   if (LdSt.getNumExplicitOperands() == 3) {
2652     BaseOp = &LdSt.getOperand(1);
2653     Offset = LdSt.getOperand(2).getImm() * Scale.getKnownMinValue();
2654   } else {
2655     assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
2656     BaseOp = &LdSt.getOperand(2);
2657     Offset = LdSt.getOperand(3).getImm() * Scale.getKnownMinValue();
2658   }
2659   OffsetIsScalable = Scale.isScalable();
2660 
2661   if (!BaseOp->isReg() && !BaseOp->isFI())
2662     return false;
2663 
2664   return true;
2665 }
2666 
2667 MachineOperand &
getMemOpBaseRegImmOfsOffsetOperand(MachineInstr & LdSt) const2668 AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const {
2669   assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
2670   MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands() - 1);
2671   assert(OfsOp.isImm() && "Offset operand wasn't immediate.");
2672   return OfsOp;
2673 }
2674 
getMemOpInfo(unsigned Opcode,TypeSize & Scale,unsigned & Width,int64_t & MinOffset,int64_t & MaxOffset)2675 bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
2676                                     unsigned &Width, int64_t &MinOffset,
2677                                     int64_t &MaxOffset) {
2678   const unsigned SVEMaxBytesPerVector = AArch64::SVEMaxBitsPerVector / 8;
2679   switch (Opcode) {
2680   // Not a memory operation or something we want to handle.
2681   default:
2682     Scale = TypeSize::Fixed(0);
2683     Width = 0;
2684     MinOffset = MaxOffset = 0;
2685     return false;
2686   case AArch64::STRWpost:
2687   case AArch64::LDRWpost:
2688     Width = 32;
2689     Scale = TypeSize::Fixed(4);
2690     MinOffset = -256;
2691     MaxOffset = 255;
2692     break;
2693   case AArch64::LDURQi:
2694   case AArch64::STURQi:
2695     Width = 16;
2696     Scale = TypeSize::Fixed(1);
2697     MinOffset = -256;
2698     MaxOffset = 255;
2699     break;
2700   case AArch64::PRFUMi:
2701   case AArch64::LDURXi:
2702   case AArch64::LDURDi:
2703   case AArch64::STURXi:
2704   case AArch64::STURDi:
2705     Width = 8;
2706     Scale = TypeSize::Fixed(1);
2707     MinOffset = -256;
2708     MaxOffset = 255;
2709     break;
2710   case AArch64::LDURWi:
2711   case AArch64::LDURSi:
2712   case AArch64::LDURSWi:
2713   case AArch64::STURWi:
2714   case AArch64::STURSi:
2715     Width = 4;
2716     Scale = TypeSize::Fixed(1);
2717     MinOffset = -256;
2718     MaxOffset = 255;
2719     break;
2720   case AArch64::LDURHi:
2721   case AArch64::LDURHHi:
2722   case AArch64::LDURSHXi:
2723   case AArch64::LDURSHWi:
2724   case AArch64::STURHi:
2725   case AArch64::STURHHi:
2726     Width = 2;
2727     Scale = TypeSize::Fixed(1);
2728     MinOffset = -256;
2729     MaxOffset = 255;
2730     break;
2731   case AArch64::LDURBi:
2732   case AArch64::LDURBBi:
2733   case AArch64::LDURSBXi:
2734   case AArch64::LDURSBWi:
2735   case AArch64::STURBi:
2736   case AArch64::STURBBi:
2737     Width = 1;
2738     Scale = TypeSize::Fixed(1);
2739     MinOffset = -256;
2740     MaxOffset = 255;
2741     break;
2742   case AArch64::LDPQi:
2743   case AArch64::LDNPQi:
2744   case AArch64::STPQi:
2745   case AArch64::STNPQi:
2746     Scale = TypeSize::Fixed(16);
2747     Width = 32;
2748     MinOffset = -64;
2749     MaxOffset = 63;
2750     break;
2751   case AArch64::LDRQui:
2752   case AArch64::STRQui:
2753     Scale = TypeSize::Fixed(16);
2754     Width = 16;
2755     MinOffset = 0;
2756     MaxOffset = 4095;
2757     break;
2758   case AArch64::LDPXi:
2759   case AArch64::LDPDi:
2760   case AArch64::LDNPXi:
2761   case AArch64::LDNPDi:
2762   case AArch64::STPXi:
2763   case AArch64::STPDi:
2764   case AArch64::STNPXi:
2765   case AArch64::STNPDi:
2766     Scale = TypeSize::Fixed(8);
2767     Width = 16;
2768     MinOffset = -64;
2769     MaxOffset = 63;
2770     break;
2771   case AArch64::PRFMui:
2772   case AArch64::LDRXui:
2773   case AArch64::LDRDui:
2774   case AArch64::STRXui:
2775   case AArch64::STRDui:
2776     Scale = TypeSize::Fixed(8);
2777     Width = 8;
2778     MinOffset = 0;
2779     MaxOffset = 4095;
2780     break;
2781   case AArch64::StoreSwiftAsyncContext:
2782     // Store is an STRXui, but there might be an ADDXri in the expansion too.
2783     Scale = TypeSize::Fixed(1);
2784     Width = 8;
2785     MinOffset = 0;
2786     MaxOffset = 4095;
2787     break;
2788   case AArch64::LDPWi:
2789   case AArch64::LDPSi:
2790   case AArch64::LDNPWi:
2791   case AArch64::LDNPSi:
2792   case AArch64::STPWi:
2793   case AArch64::STPSi:
2794   case AArch64::STNPWi:
2795   case AArch64::STNPSi:
2796     Scale = TypeSize::Fixed(4);
2797     Width = 8;
2798     MinOffset = -64;
2799     MaxOffset = 63;
2800     break;
2801   case AArch64::LDRWui:
2802   case AArch64::LDRSui:
2803   case AArch64::LDRSWui:
2804   case AArch64::STRWui:
2805   case AArch64::STRSui:
2806     Scale = TypeSize::Fixed(4);
2807     Width = 4;
2808     MinOffset = 0;
2809     MaxOffset = 4095;
2810     break;
2811   case AArch64::LDRHui:
2812   case AArch64::LDRHHui:
2813   case AArch64::LDRSHWui:
2814   case AArch64::LDRSHXui:
2815   case AArch64::STRHui:
2816   case AArch64::STRHHui:
2817     Scale = TypeSize::Fixed(2);
2818     Width = 2;
2819     MinOffset = 0;
2820     MaxOffset = 4095;
2821     break;
2822   case AArch64::LDRBui:
2823   case AArch64::LDRBBui:
2824   case AArch64::LDRSBWui:
2825   case AArch64::LDRSBXui:
2826   case AArch64::STRBui:
2827   case AArch64::STRBBui:
2828     Scale = TypeSize::Fixed(1);
2829     Width = 1;
2830     MinOffset = 0;
2831     MaxOffset = 4095;
2832     break;
2833   case AArch64::STPXpre:
2834   case AArch64::LDPXpost:
2835   case AArch64::STPDpre:
2836   case AArch64::LDPDpost:
2837     Scale = TypeSize::Fixed(8);
2838     Width = 8;
2839     MinOffset = -512;
2840     MaxOffset = 504;
2841     break;
2842   case AArch64::STPQpre:
2843   case AArch64::LDPQpost:
2844     Scale = TypeSize::Fixed(16);
2845     Width = 16;
2846     MinOffset = -1024;
2847     MaxOffset = 1008;
2848     break;
2849   case AArch64::STRXpre:
2850   case AArch64::STRDpre:
2851   case AArch64::LDRXpost:
2852   case AArch64::LDRDpost:
2853     Scale = TypeSize::Fixed(1);
2854     Width = 8;
2855     MinOffset = -256;
2856     MaxOffset = 255;
2857     break;
2858   case AArch64::STRQpre:
2859   case AArch64::LDRQpost:
2860     Scale = TypeSize::Fixed(1);
2861     Width = 16;
2862     MinOffset = -256;
2863     MaxOffset = 255;
2864     break;
2865   case AArch64::ADDG:
2866     Scale = TypeSize::Fixed(16);
2867     Width = 0;
2868     MinOffset = 0;
2869     MaxOffset = 63;
2870     break;
2871   case AArch64::TAGPstack:
2872     Scale = TypeSize::Fixed(16);
2873     Width = 0;
2874     // TAGP with a negative offset turns into SUBP, which has a maximum offset
2875     // of 63 (not 64!).
2876     MinOffset = -63;
2877     MaxOffset = 63;
2878     break;
2879   case AArch64::LDG:
2880   case AArch64::STGOffset:
2881   case AArch64::STZGOffset:
2882     Scale = TypeSize::Fixed(16);
2883     Width = 16;
2884     MinOffset = -256;
2885     MaxOffset = 255;
2886     break;
2887   case AArch64::STR_ZZZZXI:
2888   case AArch64::LDR_ZZZZXI:
2889     Scale = TypeSize::Scalable(16);
2890     Width = SVEMaxBytesPerVector * 4;
2891     MinOffset = -256;
2892     MaxOffset = 252;
2893     break;
2894   case AArch64::STR_ZZZXI:
2895   case AArch64::LDR_ZZZXI:
2896     Scale = TypeSize::Scalable(16);
2897     Width = SVEMaxBytesPerVector * 3;
2898     MinOffset = -256;
2899     MaxOffset = 253;
2900     break;
2901   case AArch64::STR_ZZXI:
2902   case AArch64::LDR_ZZXI:
2903     Scale = TypeSize::Scalable(16);
2904     Width = SVEMaxBytesPerVector * 2;
2905     MinOffset = -256;
2906     MaxOffset = 254;
2907     break;
2908   case AArch64::LDR_PXI:
2909   case AArch64::STR_PXI:
2910     Scale = TypeSize::Scalable(2);
2911     Width = SVEMaxBytesPerVector / 8;
2912     MinOffset = -256;
2913     MaxOffset = 255;
2914     break;
2915   case AArch64::LDR_ZXI:
2916   case AArch64::STR_ZXI:
2917     Scale = TypeSize::Scalable(16);
2918     Width = SVEMaxBytesPerVector;
2919     MinOffset = -256;
2920     MaxOffset = 255;
2921     break;
2922   case AArch64::LD1B_IMM:
2923   case AArch64::LD1H_IMM:
2924   case AArch64::LD1W_IMM:
2925   case AArch64::LD1D_IMM:
2926   case AArch64::LDNT1B_ZRI:
2927   case AArch64::LDNT1H_ZRI:
2928   case AArch64::LDNT1W_ZRI:
2929   case AArch64::LDNT1D_ZRI:
2930   case AArch64::ST1B_IMM:
2931   case AArch64::ST1H_IMM:
2932   case AArch64::ST1W_IMM:
2933   case AArch64::ST1D_IMM:
2934   case AArch64::STNT1B_ZRI:
2935   case AArch64::STNT1H_ZRI:
2936   case AArch64::STNT1W_ZRI:
2937   case AArch64::STNT1D_ZRI:
2938   case AArch64::LDNF1B_IMM:
2939   case AArch64::LDNF1H_IMM:
2940   case AArch64::LDNF1W_IMM:
2941   case AArch64::LDNF1D_IMM:
2942     // A full vectors worth of data
2943     // Width = mbytes * elements
2944     Scale = TypeSize::Scalable(16);
2945     Width = SVEMaxBytesPerVector;
2946     MinOffset = -8;
2947     MaxOffset = 7;
2948     break;
2949   case AArch64::LD2B_IMM:
2950   case AArch64::LD2H_IMM:
2951   case AArch64::LD2W_IMM:
2952   case AArch64::LD2D_IMM:
2953   case AArch64::ST2B_IMM:
2954   case AArch64::ST2H_IMM:
2955   case AArch64::ST2W_IMM:
2956   case AArch64::ST2D_IMM:
2957     Scale = TypeSize::Scalable(32);
2958     Width = SVEMaxBytesPerVector * 2;
2959     MinOffset = -8;
2960     MaxOffset = 7;
2961     break;
2962   case AArch64::LD3B_IMM:
2963   case AArch64::LD3H_IMM:
2964   case AArch64::LD3W_IMM:
2965   case AArch64::LD3D_IMM:
2966   case AArch64::ST3B_IMM:
2967   case AArch64::ST3H_IMM:
2968   case AArch64::ST3W_IMM:
2969   case AArch64::ST3D_IMM:
2970     Scale = TypeSize::Scalable(48);
2971     Width = SVEMaxBytesPerVector * 3;
2972     MinOffset = -8;
2973     MaxOffset = 7;
2974     break;
2975   case AArch64::LD4B_IMM:
2976   case AArch64::LD4H_IMM:
2977   case AArch64::LD4W_IMM:
2978   case AArch64::LD4D_IMM:
2979   case AArch64::ST4B_IMM:
2980   case AArch64::ST4H_IMM:
2981   case AArch64::ST4W_IMM:
2982   case AArch64::ST4D_IMM:
2983     Scale = TypeSize::Scalable(64);
2984     Width = SVEMaxBytesPerVector * 4;
2985     MinOffset = -8;
2986     MaxOffset = 7;
2987     break;
2988   case AArch64::LD1B_H_IMM:
2989   case AArch64::LD1SB_H_IMM:
2990   case AArch64::LD1H_S_IMM:
2991   case AArch64::LD1SH_S_IMM:
2992   case AArch64::LD1W_D_IMM:
2993   case AArch64::LD1SW_D_IMM:
2994   case AArch64::ST1B_H_IMM:
2995   case AArch64::ST1H_S_IMM:
2996   case AArch64::ST1W_D_IMM:
2997   case AArch64::LDNF1B_H_IMM:
2998   case AArch64::LDNF1SB_H_IMM:
2999   case AArch64::LDNF1H_S_IMM:
3000   case AArch64::LDNF1SH_S_IMM:
3001   case AArch64::LDNF1W_D_IMM:
3002   case AArch64::LDNF1SW_D_IMM:
3003     // A half vector worth of data
3004     // Width = mbytes * elements
3005     Scale = TypeSize::Scalable(8);
3006     Width = SVEMaxBytesPerVector / 2;
3007     MinOffset = -8;
3008     MaxOffset = 7;
3009     break;
3010   case AArch64::LD1B_S_IMM:
3011   case AArch64::LD1SB_S_IMM:
3012   case AArch64::LD1H_D_IMM:
3013   case AArch64::LD1SH_D_IMM:
3014   case AArch64::ST1B_S_IMM:
3015   case AArch64::ST1H_D_IMM:
3016   case AArch64::LDNF1B_S_IMM:
3017   case AArch64::LDNF1SB_S_IMM:
3018   case AArch64::LDNF1H_D_IMM:
3019   case AArch64::LDNF1SH_D_IMM:
3020     // A quarter vector worth of data
3021     // Width = mbytes * elements
3022     Scale = TypeSize::Scalable(4);
3023     Width = SVEMaxBytesPerVector / 4;
3024     MinOffset = -8;
3025     MaxOffset = 7;
3026     break;
3027   case AArch64::LD1B_D_IMM:
3028   case AArch64::LD1SB_D_IMM:
3029   case AArch64::ST1B_D_IMM:
3030   case AArch64::LDNF1B_D_IMM:
3031   case AArch64::LDNF1SB_D_IMM:
3032     // A eighth vector worth of data
3033     // Width = mbytes * elements
3034     Scale = TypeSize::Scalable(2);
3035     Width = SVEMaxBytesPerVector / 8;
3036     MinOffset = -8;
3037     MaxOffset = 7;
3038     break;
3039   case AArch64::ST2GOffset:
3040   case AArch64::STZ2GOffset:
3041     Scale = TypeSize::Fixed(16);
3042     Width = 32;
3043     MinOffset = -256;
3044     MaxOffset = 255;
3045     break;
3046   case AArch64::STGPi:
3047     Scale = TypeSize::Fixed(16);
3048     Width = 16;
3049     MinOffset = -64;
3050     MaxOffset = 63;
3051     break;
3052   case AArch64::LD1RB_IMM:
3053   case AArch64::LD1RB_H_IMM:
3054   case AArch64::LD1RB_S_IMM:
3055   case AArch64::LD1RB_D_IMM:
3056   case AArch64::LD1RSB_H_IMM:
3057   case AArch64::LD1RSB_S_IMM:
3058   case AArch64::LD1RSB_D_IMM:
3059     Scale = TypeSize::Fixed(1);
3060     Width = 1;
3061     MinOffset = 0;
3062     MaxOffset = 63;
3063     break;
3064   case AArch64::LD1RH_IMM:
3065   case AArch64::LD1RH_S_IMM:
3066   case AArch64::LD1RH_D_IMM:
3067   case AArch64::LD1RSH_S_IMM:
3068   case AArch64::LD1RSH_D_IMM:
3069     Scale = TypeSize::Fixed(2);
3070     Width = 2;
3071     MinOffset = 0;
3072     MaxOffset = 63;
3073     break;
3074   case AArch64::LD1RW_IMM:
3075   case AArch64::LD1RW_D_IMM:
3076   case AArch64::LD1RSW_IMM:
3077     Scale = TypeSize::Fixed(4);
3078     Width = 4;
3079     MinOffset = 0;
3080     MaxOffset = 63;
3081     break;
3082   case AArch64::LD1RD_IMM:
3083     Scale = TypeSize::Fixed(8);
3084     Width = 8;
3085     MinOffset = 0;
3086     MaxOffset = 63;
3087     break;
3088   }
3089 
3090   return true;
3091 }
3092 
3093 // Scaling factor for unscaled load or store.
getMemScale(unsigned Opc)3094 int AArch64InstrInfo::getMemScale(unsigned Opc) {
3095   switch (Opc) {
3096   default:
3097     llvm_unreachable("Opcode has unknown scale!");
3098   case AArch64::LDRBBui:
3099   case AArch64::LDURBBi:
3100   case AArch64::LDRSBWui:
3101   case AArch64::LDURSBWi:
3102   case AArch64::STRBBui:
3103   case AArch64::STURBBi:
3104     return 1;
3105   case AArch64::LDRHHui:
3106   case AArch64::LDURHHi:
3107   case AArch64::LDRSHWui:
3108   case AArch64::LDURSHWi:
3109   case AArch64::STRHHui:
3110   case AArch64::STURHHi:
3111     return 2;
3112   case AArch64::LDRSui:
3113   case AArch64::LDURSi:
3114   case AArch64::LDRSpre:
3115   case AArch64::LDRSWui:
3116   case AArch64::LDURSWi:
3117   case AArch64::LDRWpre:
3118   case AArch64::LDRWui:
3119   case AArch64::LDURWi:
3120   case AArch64::STRSui:
3121   case AArch64::STURSi:
3122   case AArch64::STRSpre:
3123   case AArch64::STRWui:
3124   case AArch64::STURWi:
3125   case AArch64::STRWpre:
3126   case AArch64::LDPSi:
3127   case AArch64::LDPSWi:
3128   case AArch64::LDPWi:
3129   case AArch64::STPSi:
3130   case AArch64::STPWi:
3131     return 4;
3132   case AArch64::LDRDui:
3133   case AArch64::LDURDi:
3134   case AArch64::LDRDpre:
3135   case AArch64::LDRXui:
3136   case AArch64::LDURXi:
3137   case AArch64::LDRXpre:
3138   case AArch64::STRDui:
3139   case AArch64::STURDi:
3140   case AArch64::STRDpre:
3141   case AArch64::STRXui:
3142   case AArch64::STURXi:
3143   case AArch64::STRXpre:
3144   case AArch64::LDPDi:
3145   case AArch64::LDPXi:
3146   case AArch64::STPDi:
3147   case AArch64::STPXi:
3148     return 8;
3149   case AArch64::LDRQui:
3150   case AArch64::LDURQi:
3151   case AArch64::STRQui:
3152   case AArch64::STURQi:
3153   case AArch64::STRQpre:
3154   case AArch64::LDPQi:
3155   case AArch64::LDRQpre:
3156   case AArch64::STPQi:
3157   case AArch64::STGOffset:
3158   case AArch64::STZGOffset:
3159   case AArch64::ST2GOffset:
3160   case AArch64::STZ2GOffset:
3161   case AArch64::STGPi:
3162     return 16;
3163   }
3164 }
3165 
isPreLd(const MachineInstr & MI)3166 bool AArch64InstrInfo::isPreLd(const MachineInstr &MI) {
3167   switch (MI.getOpcode()) {
3168   default:
3169     return false;
3170   case AArch64::LDRWpre:
3171   case AArch64::LDRXpre:
3172   case AArch64::LDRSpre:
3173   case AArch64::LDRDpre:
3174   case AArch64::LDRQpre:
3175     return true;
3176   }
3177 }
3178 
isPreSt(const MachineInstr & MI)3179 bool AArch64InstrInfo::isPreSt(const MachineInstr &MI) {
3180   switch (MI.getOpcode()) {
3181   default:
3182     return false;
3183   case AArch64::STRWpre:
3184   case AArch64::STRXpre:
3185   case AArch64::STRSpre:
3186   case AArch64::STRDpre:
3187   case AArch64::STRQpre:
3188     return true;
3189   }
3190 }
3191 
isPreLdSt(const MachineInstr & MI)3192 bool AArch64InstrInfo::isPreLdSt(const MachineInstr &MI) {
3193   return isPreLd(MI) || isPreSt(MI);
3194 }
3195 
isPairedLdSt(const MachineInstr & MI)3196 bool AArch64InstrInfo::isPairedLdSt(const MachineInstr &MI) {
3197   switch (MI.getOpcode()) {
3198   default:
3199     return false;
3200   case AArch64::LDPSi:
3201   case AArch64::LDPSWi:
3202   case AArch64::LDPDi:
3203   case AArch64::LDPQi:
3204   case AArch64::LDPWi:
3205   case AArch64::LDPXi:
3206   case AArch64::STPSi:
3207   case AArch64::STPDi:
3208   case AArch64::STPQi:
3209   case AArch64::STPWi:
3210   case AArch64::STPXi:
3211   case AArch64::STGPi:
3212     return true;
3213   }
3214 }
3215 
getLdStBaseOp(const MachineInstr & MI)3216 const MachineOperand &AArch64InstrInfo::getLdStBaseOp(const MachineInstr &MI) {
3217   unsigned Idx =
3218       AArch64InstrInfo::isPairedLdSt(MI) || AArch64InstrInfo::isPreLdSt(MI) ? 2
3219                                                                             : 1;
3220   return MI.getOperand(Idx);
3221 }
3222 
3223 const MachineOperand &
getLdStOffsetOp(const MachineInstr & MI)3224 AArch64InstrInfo::getLdStOffsetOp(const MachineInstr &MI) {
3225   unsigned Idx =
3226       AArch64InstrInfo::isPairedLdSt(MI) || AArch64InstrInfo::isPreLdSt(MI) ? 3
3227                                                                             : 2;
3228   return MI.getOperand(Idx);
3229 }
3230 
getRegClass(const MachineInstr & MI,Register Reg)3231 static const TargetRegisterClass *getRegClass(const MachineInstr &MI,
3232                                               Register Reg) {
3233   if (MI.getParent() == nullptr)
3234     return nullptr;
3235   const MachineFunction *MF = MI.getParent()->getParent();
3236   return MF ? MF->getRegInfo().getRegClassOrNull(Reg) : nullptr;
3237 }
3238 
isQForm(const MachineInstr & MI)3239 bool AArch64InstrInfo::isQForm(const MachineInstr &MI) {
3240   auto IsQFPR = [&](const MachineOperand &Op) {
3241     if (!Op.isReg())
3242       return false;
3243     auto Reg = Op.getReg();
3244     if (Reg.isPhysical())
3245       return AArch64::FPR128RegClass.contains(Reg);
3246     const TargetRegisterClass *TRC = ::getRegClass(MI, Reg);
3247     return TRC == &AArch64::FPR128RegClass ||
3248            TRC == &AArch64::FPR128_loRegClass;
3249   };
3250   return llvm::any_of(MI.operands(), IsQFPR);
3251 }
3252 
isFpOrNEON(const MachineInstr & MI)3253 bool AArch64InstrInfo::isFpOrNEON(const MachineInstr &MI) {
3254   auto IsFPR = [&](const MachineOperand &Op) {
3255     if (!Op.isReg())
3256       return false;
3257     auto Reg = Op.getReg();
3258     if (Reg.isPhysical())
3259       return AArch64::FPR128RegClass.contains(Reg) ||
3260              AArch64::FPR64RegClass.contains(Reg) ||
3261              AArch64::FPR32RegClass.contains(Reg) ||
3262              AArch64::FPR16RegClass.contains(Reg) ||
3263              AArch64::FPR8RegClass.contains(Reg);
3264 
3265     const TargetRegisterClass *TRC = ::getRegClass(MI, Reg);
3266     return TRC == &AArch64::FPR128RegClass ||
3267            TRC == &AArch64::FPR128_loRegClass ||
3268            TRC == &AArch64::FPR64RegClass ||
3269            TRC == &AArch64::FPR64_loRegClass ||
3270            TRC == &AArch64::FPR32RegClass || TRC == &AArch64::FPR16RegClass ||
3271            TRC == &AArch64::FPR8RegClass;
3272   };
3273   return llvm::any_of(MI.operands(), IsFPR);
3274 }
3275 
3276 // Scale the unscaled offsets.  Returns false if the unscaled offset can't be
3277 // scaled.
scaleOffset(unsigned Opc,int64_t & Offset)3278 static bool scaleOffset(unsigned Opc, int64_t &Offset) {
3279   int Scale = AArch64InstrInfo::getMemScale(Opc);
3280 
3281   // If the byte-offset isn't a multiple of the stride, we can't scale this
3282   // offset.
3283   if (Offset % Scale != 0)
3284     return false;
3285 
3286   // Convert the byte-offset used by unscaled into an "element" offset used
3287   // by the scaled pair load/store instructions.
3288   Offset /= Scale;
3289   return true;
3290 }
3291 
canPairLdStOpc(unsigned FirstOpc,unsigned SecondOpc)3292 static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
3293   if (FirstOpc == SecondOpc)
3294     return true;
3295   // We can also pair sign-ext and zero-ext instructions.
3296   switch (FirstOpc) {
3297   default:
3298     return false;
3299   case AArch64::LDRWui:
3300   case AArch64::LDURWi:
3301     return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
3302   case AArch64::LDRSWui:
3303   case AArch64::LDURSWi:
3304     return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
3305   }
3306   // These instructions can't be paired based on their opcodes.
3307   return false;
3308 }
3309 
shouldClusterFI(const MachineFrameInfo & MFI,int FI1,int64_t Offset1,unsigned Opcode1,int FI2,int64_t Offset2,unsigned Opcode2)3310 static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
3311                             int64_t Offset1, unsigned Opcode1, int FI2,
3312                             int64_t Offset2, unsigned Opcode2) {
3313   // Accesses through fixed stack object frame indices may access a different
3314   // fixed stack slot. Check that the object offsets + offsets match.
3315   if (MFI.isFixedObjectIndex(FI1) && MFI.isFixedObjectIndex(FI2)) {
3316     int64_t ObjectOffset1 = MFI.getObjectOffset(FI1);
3317     int64_t ObjectOffset2 = MFI.getObjectOffset(FI2);
3318     assert(ObjectOffset1 <= ObjectOffset2 && "Object offsets are not ordered.");
3319     // Convert to scaled object offsets.
3320     int Scale1 = AArch64InstrInfo::getMemScale(Opcode1);
3321     if (ObjectOffset1 % Scale1 != 0)
3322       return false;
3323     ObjectOffset1 /= Scale1;
3324     int Scale2 = AArch64InstrInfo::getMemScale(Opcode2);
3325     if (ObjectOffset2 % Scale2 != 0)
3326       return false;
3327     ObjectOffset2 /= Scale2;
3328     ObjectOffset1 += Offset1;
3329     ObjectOffset2 += Offset2;
3330     return ObjectOffset1 + 1 == ObjectOffset2;
3331   }
3332 
3333   return FI1 == FI2;
3334 }
3335 
3336 /// Detect opportunities for ldp/stp formation.
3337 ///
3338 /// Only called for LdSt for which getMemOperandWithOffset returns true.
shouldClusterMemOps(ArrayRef<const MachineOperand * > BaseOps1,ArrayRef<const MachineOperand * > BaseOps2,unsigned NumLoads,unsigned NumBytes) const3339 bool AArch64InstrInfo::shouldClusterMemOps(
3340     ArrayRef<const MachineOperand *> BaseOps1,
3341     ArrayRef<const MachineOperand *> BaseOps2, unsigned NumLoads,
3342     unsigned NumBytes) const {
3343   assert(BaseOps1.size() == 1 && BaseOps2.size() == 1);
3344   const MachineOperand &BaseOp1 = *BaseOps1.front();
3345   const MachineOperand &BaseOp2 = *BaseOps2.front();
3346   const MachineInstr &FirstLdSt = *BaseOp1.getParent();
3347   const MachineInstr &SecondLdSt = *BaseOp2.getParent();
3348   if (BaseOp1.getType() != BaseOp2.getType())
3349     return false;
3350 
3351   assert((BaseOp1.isReg() || BaseOp1.isFI()) &&
3352          "Only base registers and frame indices are supported.");
3353 
3354   // Check for both base regs and base FI.
3355   if (BaseOp1.isReg() && BaseOp1.getReg() != BaseOp2.getReg())
3356     return false;
3357 
3358   // Only cluster up to a single pair.
3359   if (NumLoads > 2)
3360     return false;
3361 
3362   if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
3363     return false;
3364 
3365   // Can we pair these instructions based on their opcodes?
3366   unsigned FirstOpc = FirstLdSt.getOpcode();
3367   unsigned SecondOpc = SecondLdSt.getOpcode();
3368   if (!canPairLdStOpc(FirstOpc, SecondOpc))
3369     return false;
3370 
3371   // Can't merge volatiles or load/stores that have a hint to avoid pair
3372   // formation, for example.
3373   if (!isCandidateToMergeOrPair(FirstLdSt) ||
3374       !isCandidateToMergeOrPair(SecondLdSt))
3375     return false;
3376 
3377   // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
3378   int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
3379   if (hasUnscaledLdStOffset(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
3380     return false;
3381 
3382   int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
3383   if (hasUnscaledLdStOffset(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
3384     return false;
3385 
3386   // Pairwise instructions have a 7-bit signed offset field.
3387   if (Offset1 > 63 || Offset1 < -64)
3388     return false;
3389 
3390   // The caller should already have ordered First/SecondLdSt by offset.
3391   // Note: except for non-equal frame index bases
3392   if (BaseOp1.isFI()) {
3393     assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) &&
3394            "Caller should have ordered offsets.");
3395 
3396     const MachineFrameInfo &MFI =
3397         FirstLdSt.getParent()->getParent()->getFrameInfo();
3398     return shouldClusterFI(MFI, BaseOp1.getIndex(), Offset1, FirstOpc,
3399                            BaseOp2.getIndex(), Offset2, SecondOpc);
3400   }
3401 
3402   assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
3403 
3404   return Offset1 + 1 == Offset2;
3405 }
3406 
AddSubReg(const MachineInstrBuilder & MIB,unsigned Reg,unsigned SubIdx,unsigned State,const TargetRegisterInfo * TRI)3407 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
3408                                             unsigned Reg, unsigned SubIdx,
3409                                             unsigned State,
3410                                             const TargetRegisterInfo *TRI) {
3411   if (!SubIdx)
3412     return MIB.addReg(Reg, State);
3413 
3414   if (Register::isPhysicalRegister(Reg))
3415     return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
3416   return MIB.addReg(Reg, State, SubIdx);
3417 }
3418 
forwardCopyWillClobberTuple(unsigned DestReg,unsigned SrcReg,unsigned NumRegs)3419 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
3420                                         unsigned NumRegs) {
3421   // We really want the positive remainder mod 32 here, that happens to be
3422   // easily obtainable with a mask.
3423   return ((DestReg - SrcReg) & 0x1f) < NumRegs;
3424 }
3425 
copyPhysRegTuple(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc,unsigned Opcode,ArrayRef<unsigned> Indices) const3426 void AArch64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB,
3427                                         MachineBasicBlock::iterator I,
3428                                         const DebugLoc &DL, MCRegister DestReg,
3429                                         MCRegister SrcReg, bool KillSrc,
3430                                         unsigned Opcode,
3431                                         ArrayRef<unsigned> Indices) const {
3432   assert(Subtarget.hasNEON() && "Unexpected register copy without NEON");
3433   const TargetRegisterInfo *TRI = &getRegisterInfo();
3434   uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
3435   uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
3436   unsigned NumRegs = Indices.size();
3437 
3438   int SubReg = 0, End = NumRegs, Incr = 1;
3439   if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
3440     SubReg = NumRegs - 1;
3441     End = -1;
3442     Incr = -1;
3443   }
3444 
3445   for (; SubReg != End; SubReg += Incr) {
3446     const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
3447     AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
3448     AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
3449     AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
3450   }
3451 }
3452 
copyGPRRegTuple(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,DebugLoc DL,unsigned DestReg,unsigned SrcReg,bool KillSrc,unsigned Opcode,unsigned ZeroReg,llvm::ArrayRef<unsigned> Indices) const3453 void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock &MBB,
3454                                        MachineBasicBlock::iterator I,
3455                                        DebugLoc DL, unsigned DestReg,
3456                                        unsigned SrcReg, bool KillSrc,
3457                                        unsigned Opcode, unsigned ZeroReg,
3458                                        llvm::ArrayRef<unsigned> Indices) const {
3459   const TargetRegisterInfo *TRI = &getRegisterInfo();
3460   unsigned NumRegs = Indices.size();
3461 
3462 #ifndef NDEBUG
3463   uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
3464   uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
3465   assert(DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 &&
3466          "GPR reg sequences should not be able to overlap");
3467 #endif
3468 
3469   for (unsigned SubReg = 0; SubReg != NumRegs; ++SubReg) {
3470     const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
3471     AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
3472     MIB.addReg(ZeroReg);
3473     AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
3474     MIB.addImm(0);
3475   }
3476 }
3477 
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc) const3478 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
3479                                    MachineBasicBlock::iterator I,
3480                                    const DebugLoc &DL, MCRegister DestReg,
3481                                    MCRegister SrcReg, bool KillSrc) const {
3482   if (AArch64::GPR32spRegClass.contains(DestReg) &&
3483       (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
3484     const TargetRegisterInfo *TRI = &getRegisterInfo();
3485 
3486     if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
3487       // If either operand is WSP, expand to ADD #0.
3488       if (Subtarget.hasZeroCycleRegMove()) {
3489         // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
3490         MCRegister DestRegX = TRI->getMatchingSuperReg(
3491             DestReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
3492         MCRegister SrcRegX = TRI->getMatchingSuperReg(
3493             SrcReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
3494         // This instruction is reading and writing X registers.  This may upset
3495         // the register scavenger and machine verifier, so we need to indicate
3496         // that we are reading an undefined value from SrcRegX, but a proper
3497         // value from SrcReg.
3498         BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
3499             .addReg(SrcRegX, RegState::Undef)
3500             .addImm(0)
3501             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
3502             .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
3503       } else {
3504         BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
3505             .addReg(SrcReg, getKillRegState(KillSrc))
3506             .addImm(0)
3507             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
3508       }
3509     } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroingGP()) {
3510       BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg)
3511           .addImm(0)
3512           .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
3513     } else {
3514       if (Subtarget.hasZeroCycleRegMove()) {
3515         // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
3516         MCRegister DestRegX = TRI->getMatchingSuperReg(
3517             DestReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
3518         MCRegister SrcRegX = TRI->getMatchingSuperReg(
3519             SrcReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
3520         // This instruction is reading and writing X registers.  This may upset
3521         // the register scavenger and machine verifier, so we need to indicate
3522         // that we are reading an undefined value from SrcRegX, but a proper
3523         // value from SrcReg.
3524         BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
3525             .addReg(AArch64::XZR)
3526             .addReg(SrcRegX, RegState::Undef)
3527             .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
3528       } else {
3529         // Otherwise, expand to ORR WZR.
3530         BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
3531             .addReg(AArch64::WZR)
3532             .addReg(SrcReg, getKillRegState(KillSrc));
3533       }
3534     }
3535     return;
3536   }
3537 
3538   // Copy a Predicate register by ORRing with itself.
3539   if (AArch64::PPRRegClass.contains(DestReg) &&
3540       AArch64::PPRRegClass.contains(SrcReg)) {
3541     assert(Subtarget.hasSVEorSME() && "Unexpected SVE register.");
3542     BuildMI(MBB, I, DL, get(AArch64::ORR_PPzPP), DestReg)
3543       .addReg(SrcReg) // Pg
3544       .addReg(SrcReg)
3545       .addReg(SrcReg, getKillRegState(KillSrc));
3546     return;
3547   }
3548 
3549   // Copy a Z register by ORRing with itself.
3550   if (AArch64::ZPRRegClass.contains(DestReg) &&
3551       AArch64::ZPRRegClass.contains(SrcReg)) {
3552     assert(Subtarget.hasSVEorSME() && "Unexpected SVE register.");
3553     BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ), DestReg)
3554       .addReg(SrcReg)
3555       .addReg(SrcReg, getKillRegState(KillSrc));
3556     return;
3557   }
3558 
3559   // Copy a Z register pair by copying the individual sub-registers.
3560   if (AArch64::ZPR2RegClass.contains(DestReg) &&
3561       AArch64::ZPR2RegClass.contains(SrcReg)) {
3562     assert(Subtarget.hasSVEorSME() && "Unexpected SVE register.");
3563     static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1};
3564     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ,
3565                      Indices);
3566     return;
3567   }
3568 
3569   // Copy a Z register triple by copying the individual sub-registers.
3570   if (AArch64::ZPR3RegClass.contains(DestReg) &&
3571       AArch64::ZPR3RegClass.contains(SrcReg)) {
3572     assert(Subtarget.hasSVEorSME() && "Unexpected SVE register.");
3573     static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1,
3574                                        AArch64::zsub2};
3575     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ,
3576                      Indices);
3577     return;
3578   }
3579 
3580   // Copy a Z register quad by copying the individual sub-registers.
3581   if (AArch64::ZPR4RegClass.contains(DestReg) &&
3582       AArch64::ZPR4RegClass.contains(SrcReg)) {
3583     assert(Subtarget.hasSVEorSME() && "Unexpected SVE register.");
3584     static const unsigned Indices[] = {AArch64::zsub0, AArch64::zsub1,
3585                                        AArch64::zsub2, AArch64::zsub3};
3586     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORR_ZZZ,
3587                      Indices);
3588     return;
3589   }
3590 
3591   if (AArch64::GPR64spRegClass.contains(DestReg) &&
3592       (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
3593     if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
3594       // If either operand is SP, expand to ADD #0.
3595       BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
3596           .addReg(SrcReg, getKillRegState(KillSrc))
3597           .addImm(0)
3598           .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
3599     } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroingGP()) {
3600       BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg)
3601           .addImm(0)
3602           .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
3603     } else {
3604       // Otherwise, expand to ORR XZR.
3605       BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
3606           .addReg(AArch64::XZR)
3607           .addReg(SrcReg, getKillRegState(KillSrc));
3608     }
3609     return;
3610   }
3611 
3612   // Copy a DDDD register quad by copying the individual sub-registers.
3613   if (AArch64::DDDDRegClass.contains(DestReg) &&
3614       AArch64::DDDDRegClass.contains(SrcReg)) {
3615     static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
3616                                        AArch64::dsub2, AArch64::dsub3};
3617     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
3618                      Indices);
3619     return;
3620   }
3621 
3622   // Copy a DDD register triple by copying the individual sub-registers.
3623   if (AArch64::DDDRegClass.contains(DestReg) &&
3624       AArch64::DDDRegClass.contains(SrcReg)) {
3625     static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
3626                                        AArch64::dsub2};
3627     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
3628                      Indices);
3629     return;
3630   }
3631 
3632   // Copy a DD register pair by copying the individual sub-registers.
3633   if (AArch64::DDRegClass.contains(DestReg) &&
3634       AArch64::DDRegClass.contains(SrcReg)) {
3635     static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1};
3636     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
3637                      Indices);
3638     return;
3639   }
3640 
3641   // Copy a QQQQ register quad by copying the individual sub-registers.
3642   if (AArch64::QQQQRegClass.contains(DestReg) &&
3643       AArch64::QQQQRegClass.contains(SrcReg)) {
3644     static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
3645                                        AArch64::qsub2, AArch64::qsub3};
3646     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
3647                      Indices);
3648     return;
3649   }
3650 
3651   // Copy a QQQ register triple by copying the individual sub-registers.
3652   if (AArch64::QQQRegClass.contains(DestReg) &&
3653       AArch64::QQQRegClass.contains(SrcReg)) {
3654     static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
3655                                        AArch64::qsub2};
3656     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
3657                      Indices);
3658     return;
3659   }
3660 
3661   // Copy a QQ register pair by copying the individual sub-registers.
3662   if (AArch64::QQRegClass.contains(DestReg) &&
3663       AArch64::QQRegClass.contains(SrcReg)) {
3664     static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1};
3665     copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
3666                      Indices);
3667     return;
3668   }
3669 
3670   if (AArch64::XSeqPairsClassRegClass.contains(DestReg) &&
3671       AArch64::XSeqPairsClassRegClass.contains(SrcReg)) {
3672     static const unsigned Indices[] = {AArch64::sube64, AArch64::subo64};
3673     copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRXrs,
3674                     AArch64::XZR, Indices);
3675     return;
3676   }
3677 
3678   if (AArch64::WSeqPairsClassRegClass.contains(DestReg) &&
3679       AArch64::WSeqPairsClassRegClass.contains(SrcReg)) {
3680     static const unsigned Indices[] = {AArch64::sube32, AArch64::subo32};
3681     copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRWrs,
3682                     AArch64::WZR, Indices);
3683     return;
3684   }
3685 
3686   if (AArch64::FPR128RegClass.contains(DestReg) &&
3687       AArch64::FPR128RegClass.contains(SrcReg)) {
3688     if (Subtarget.forceStreamingCompatibleSVE()) {
3689       BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ))
3690           .addReg(AArch64::Z0 + (DestReg - AArch64::Q0), RegState::Define)
3691           .addReg(AArch64::Z0 + (SrcReg - AArch64::Q0))
3692           .addReg(AArch64::Z0 + (SrcReg - AArch64::Q0));
3693     } else if (Subtarget.hasNEON()) {
3694       BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
3695           .addReg(SrcReg)
3696           .addReg(SrcReg, getKillRegState(KillSrc));
3697     } else {
3698       BuildMI(MBB, I, DL, get(AArch64::STRQpre))
3699           .addReg(AArch64::SP, RegState::Define)
3700           .addReg(SrcReg, getKillRegState(KillSrc))
3701           .addReg(AArch64::SP)
3702           .addImm(-16);
3703       BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
3704           .addReg(AArch64::SP, RegState::Define)
3705           .addReg(DestReg, RegState::Define)
3706           .addReg(AArch64::SP)
3707           .addImm(16);
3708     }
3709     return;
3710   }
3711 
3712   if (AArch64::FPR64RegClass.contains(DestReg) &&
3713       AArch64::FPR64RegClass.contains(SrcReg)) {
3714     BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
3715         .addReg(SrcReg, getKillRegState(KillSrc));
3716     return;
3717   }
3718 
3719   if (AArch64::FPR32RegClass.contains(DestReg) &&
3720       AArch64::FPR32RegClass.contains(SrcReg)) {
3721     BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
3722         .addReg(SrcReg, getKillRegState(KillSrc));
3723     return;
3724   }
3725 
3726   if (AArch64::FPR16RegClass.contains(DestReg) &&
3727       AArch64::FPR16RegClass.contains(SrcReg)) {
3728     DestReg =
3729         RI.getMatchingSuperReg(DestReg, AArch64::hsub, &AArch64::FPR32RegClass);
3730     SrcReg =
3731         RI.getMatchingSuperReg(SrcReg, AArch64::hsub, &AArch64::FPR32RegClass);
3732     BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
3733         .addReg(SrcReg, getKillRegState(KillSrc));
3734     return;
3735   }
3736 
3737   if (AArch64::FPR8RegClass.contains(DestReg) &&
3738       AArch64::FPR8RegClass.contains(SrcReg)) {
3739     DestReg =
3740         RI.getMatchingSuperReg(DestReg, AArch64::bsub, &AArch64::FPR32RegClass);
3741     SrcReg =
3742         RI.getMatchingSuperReg(SrcReg, AArch64::bsub, &AArch64::FPR32RegClass);
3743     BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
3744         .addReg(SrcReg, getKillRegState(KillSrc));
3745     return;
3746   }
3747 
3748   // Copies between GPR64 and FPR64.
3749   if (AArch64::FPR64RegClass.contains(DestReg) &&
3750       AArch64::GPR64RegClass.contains(SrcReg)) {
3751     BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
3752         .addReg(SrcReg, getKillRegState(KillSrc));
3753     return;
3754   }
3755   if (AArch64::GPR64RegClass.contains(DestReg) &&
3756       AArch64::FPR64RegClass.contains(SrcReg)) {
3757     BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
3758         .addReg(SrcReg, getKillRegState(KillSrc));
3759     return;
3760   }
3761   // Copies between GPR32 and FPR32.
3762   if (AArch64::FPR32RegClass.contains(DestReg) &&
3763       AArch64::GPR32RegClass.contains(SrcReg)) {
3764     BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
3765         .addReg(SrcReg, getKillRegState(KillSrc));
3766     return;
3767   }
3768   if (AArch64::GPR32RegClass.contains(DestReg) &&
3769       AArch64::FPR32RegClass.contains(SrcReg)) {
3770     BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
3771         .addReg(SrcReg, getKillRegState(KillSrc));
3772     return;
3773   }
3774 
3775   if (DestReg == AArch64::NZCV) {
3776     assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
3777     BuildMI(MBB, I, DL, get(AArch64::MSR))
3778         .addImm(AArch64SysReg::NZCV)
3779         .addReg(SrcReg, getKillRegState(KillSrc))
3780         .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
3781     return;
3782   }
3783 
3784   if (SrcReg == AArch64::NZCV) {
3785     assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
3786     BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
3787         .addImm(AArch64SysReg::NZCV)
3788         .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
3789     return;
3790   }
3791 
3792 #ifndef NDEBUG
3793   const TargetRegisterInfo &TRI = getRegisterInfo();
3794   errs() << TRI.getRegAsmName(DestReg) << " = COPY "
3795          << TRI.getRegAsmName(SrcReg) << "\n";
3796 #endif
3797   llvm_unreachable("unimplemented reg-to-reg copy");
3798 }
3799 
storeRegPairToStackSlot(const TargetRegisterInfo & TRI,MachineBasicBlock & MBB,MachineBasicBlock::iterator InsertBefore,const MCInstrDesc & MCID,Register SrcReg,bool IsKill,unsigned SubIdx0,unsigned SubIdx1,int FI,MachineMemOperand * MMO)3800 static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI,
3801                                     MachineBasicBlock &MBB,
3802                                     MachineBasicBlock::iterator InsertBefore,
3803                                     const MCInstrDesc &MCID,
3804                                     Register SrcReg, bool IsKill,
3805                                     unsigned SubIdx0, unsigned SubIdx1, int FI,
3806                                     MachineMemOperand *MMO) {
3807   Register SrcReg0 = SrcReg;
3808   Register SrcReg1 = SrcReg;
3809   if (SrcReg.isPhysical()) {
3810     SrcReg0 = TRI.getSubReg(SrcReg, SubIdx0);
3811     SubIdx0 = 0;
3812     SrcReg1 = TRI.getSubReg(SrcReg, SubIdx1);
3813     SubIdx1 = 0;
3814   }
3815   BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
3816       .addReg(SrcReg0, getKillRegState(IsKill), SubIdx0)
3817       .addReg(SrcReg1, getKillRegState(IsKill), SubIdx1)
3818       .addFrameIndex(FI)
3819       .addImm(0)
3820       .addMemOperand(MMO);
3821 }
3822 
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,Register SrcReg,bool isKill,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI,Register VReg) const3823 void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
3824                                            MachineBasicBlock::iterator MBBI,
3825                                            Register SrcReg, bool isKill, int FI,
3826                                            const TargetRegisterClass *RC,
3827                                            const TargetRegisterInfo *TRI,
3828                                            Register VReg) const {
3829   MachineFunction &MF = *MBB.getParent();
3830   MachineFrameInfo &MFI = MF.getFrameInfo();
3831 
3832   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
3833   MachineMemOperand *MMO =
3834       MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
3835                               MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
3836   unsigned Opc = 0;
3837   bool Offset = true;
3838   unsigned StackID = TargetStackID::Default;
3839   switch (TRI->getSpillSize(*RC)) {
3840   case 1:
3841     if (AArch64::FPR8RegClass.hasSubClassEq(RC))
3842       Opc = AArch64::STRBui;
3843     break;
3844   case 2:
3845     if (AArch64::FPR16RegClass.hasSubClassEq(RC))
3846       Opc = AArch64::STRHui;
3847     else if (AArch64::PPRRegClass.hasSubClassEq(RC)) {
3848       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3849       Opc = AArch64::STR_PXI;
3850       StackID = TargetStackID::ScalableVector;
3851     }
3852     break;
3853   case 4:
3854     if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
3855       Opc = AArch64::STRWui;
3856       if (SrcReg.isVirtual())
3857         MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
3858       else
3859         assert(SrcReg != AArch64::WSP);
3860     } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
3861       Opc = AArch64::STRSui;
3862     break;
3863   case 8:
3864     if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
3865       Opc = AArch64::STRXui;
3866       if (SrcReg.isVirtual())
3867         MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
3868       else
3869         assert(SrcReg != AArch64::SP);
3870     } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
3871       Opc = AArch64::STRDui;
3872     } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
3873       storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI,
3874                               get(AArch64::STPWi), SrcReg, isKill,
3875                               AArch64::sube32, AArch64::subo32, FI, MMO);
3876       return;
3877     }
3878     break;
3879   case 16:
3880     if (AArch64::FPR128RegClass.hasSubClassEq(RC))
3881       Opc = AArch64::STRQui;
3882     else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
3883       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3884       Opc = AArch64::ST1Twov1d;
3885       Offset = false;
3886     } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
3887       storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI,
3888                               get(AArch64::STPXi), SrcReg, isKill,
3889                               AArch64::sube64, AArch64::subo64, FI, MMO);
3890       return;
3891     } else if (AArch64::ZPRRegClass.hasSubClassEq(RC)) {
3892       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3893       Opc = AArch64::STR_ZXI;
3894       StackID = TargetStackID::ScalableVector;
3895     }
3896     break;
3897   case 24:
3898     if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
3899       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3900       Opc = AArch64::ST1Threev1d;
3901       Offset = false;
3902     }
3903     break;
3904   case 32:
3905     if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
3906       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3907       Opc = AArch64::ST1Fourv1d;
3908       Offset = false;
3909     } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
3910       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3911       Opc = AArch64::ST1Twov2d;
3912       Offset = false;
3913     } else if (AArch64::ZPR2RegClass.hasSubClassEq(RC)) {
3914       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3915       Opc = AArch64::STR_ZZXI;
3916       StackID = TargetStackID::ScalableVector;
3917     }
3918     break;
3919   case 48:
3920     if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
3921       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3922       Opc = AArch64::ST1Threev2d;
3923       Offset = false;
3924     } else if (AArch64::ZPR3RegClass.hasSubClassEq(RC)) {
3925       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3926       Opc = AArch64::STR_ZZZXI;
3927       StackID = TargetStackID::ScalableVector;
3928     }
3929     break;
3930   case 64:
3931     if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
3932       assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
3933       Opc = AArch64::ST1Fourv2d;
3934       Offset = false;
3935     } else if (AArch64::ZPR4RegClass.hasSubClassEq(RC)) {
3936       assert(Subtarget.hasSVE() && "Unexpected register store without SVE");
3937       Opc = AArch64::STR_ZZZZXI;
3938       StackID = TargetStackID::ScalableVector;
3939     }
3940     break;
3941   }
3942   assert(Opc && "Unknown register class");
3943   MFI.setStackID(FI, StackID);
3944 
3945   const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
3946                                      .addReg(SrcReg, getKillRegState(isKill))
3947                                      .addFrameIndex(FI);
3948 
3949   if (Offset)
3950     MI.addImm(0);
3951   MI.addMemOperand(MMO);
3952 }
3953 
loadRegPairFromStackSlot(const TargetRegisterInfo & TRI,MachineBasicBlock & MBB,MachineBasicBlock::iterator InsertBefore,const MCInstrDesc & MCID,Register DestReg,unsigned SubIdx0,unsigned SubIdx1,int FI,MachineMemOperand * MMO)3954 static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
3955                                      MachineBasicBlock &MBB,
3956                                      MachineBasicBlock::iterator InsertBefore,
3957                                      const MCInstrDesc &MCID,
3958                                      Register DestReg, unsigned SubIdx0,
3959                                      unsigned SubIdx1, int FI,
3960                                      MachineMemOperand *MMO) {
3961   Register DestReg0 = DestReg;
3962   Register DestReg1 = DestReg;
3963   bool IsUndef = true;
3964   if (DestReg.isPhysical()) {
3965     DestReg0 = TRI.getSubReg(DestReg, SubIdx0);
3966     SubIdx0 = 0;
3967     DestReg1 = TRI.getSubReg(DestReg, SubIdx1);
3968     SubIdx1 = 0;
3969     IsUndef = false;
3970   }
3971   BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
3972       .addReg(DestReg0, RegState::Define | getUndefRegState(IsUndef), SubIdx0)
3973       .addReg(DestReg1, RegState::Define | getUndefRegState(IsUndef), SubIdx1)
3974       .addFrameIndex(FI)
3975       .addImm(0)
3976       .addMemOperand(MMO);
3977 }
3978 
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,Register DestReg,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI,Register VReg) const3979 void AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
3980                                             MachineBasicBlock::iterator MBBI,
3981                                             Register DestReg, int FI,
3982                                             const TargetRegisterClass *RC,
3983                                             const TargetRegisterInfo *TRI,
3984                                             Register VReg) const {
3985   MachineFunction &MF = *MBB.getParent();
3986   MachineFrameInfo &MFI = MF.getFrameInfo();
3987   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
3988   MachineMemOperand *MMO =
3989       MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
3990                               MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
3991 
3992   unsigned Opc = 0;
3993   bool Offset = true;
3994   unsigned StackID = TargetStackID::Default;
3995   switch (TRI->getSpillSize(*RC)) {
3996   case 1:
3997     if (AArch64::FPR8RegClass.hasSubClassEq(RC))
3998       Opc = AArch64::LDRBui;
3999     break;
4000   case 2:
4001     if (AArch64::FPR16RegClass.hasSubClassEq(RC))
4002       Opc = AArch64::LDRHui;
4003     else if (AArch64::PPRRegClass.hasSubClassEq(RC)) {
4004       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
4005       Opc = AArch64::LDR_PXI;
4006       StackID = TargetStackID::ScalableVector;
4007     }
4008     break;
4009   case 4:
4010     if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
4011       Opc = AArch64::LDRWui;
4012       if (DestReg.isVirtual())
4013         MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
4014       else
4015         assert(DestReg != AArch64::WSP);
4016     } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
4017       Opc = AArch64::LDRSui;
4018     break;
4019   case 8:
4020     if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
4021       Opc = AArch64::LDRXui;
4022       if (DestReg.isVirtual())
4023         MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
4024       else
4025         assert(DestReg != AArch64::SP);
4026     } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
4027       Opc = AArch64::LDRDui;
4028     } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
4029       loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI,
4030                                get(AArch64::LDPWi), DestReg, AArch64::sube32,
4031                                AArch64::subo32, FI, MMO);
4032       return;
4033     }
4034     break;
4035   case 16:
4036     if (AArch64::FPR128RegClass.hasSubClassEq(RC))
4037       Opc = AArch64::LDRQui;
4038     else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
4039       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
4040       Opc = AArch64::LD1Twov1d;
4041       Offset = false;
4042     } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
4043       loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI,
4044                                get(AArch64::LDPXi), DestReg, AArch64::sube64,
4045                                AArch64::subo64, FI, MMO);
4046       return;
4047     } else if (AArch64::ZPRRegClass.hasSubClassEq(RC)) {
4048       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
4049       Opc = AArch64::LDR_ZXI;
4050       StackID = TargetStackID::ScalableVector;
4051     }
4052     break;
4053   case 24:
4054     if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
4055       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
4056       Opc = AArch64::LD1Threev1d;
4057       Offset = false;
4058     }
4059     break;
4060   case 32:
4061     if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
4062       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
4063       Opc = AArch64::LD1Fourv1d;
4064       Offset = false;
4065     } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
4066       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
4067       Opc = AArch64::LD1Twov2d;
4068       Offset = false;
4069     } else if (AArch64::ZPR2RegClass.hasSubClassEq(RC)) {
4070       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
4071       Opc = AArch64::LDR_ZZXI;
4072       StackID = TargetStackID::ScalableVector;
4073     }
4074     break;
4075   case 48:
4076     if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
4077       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
4078       Opc = AArch64::LD1Threev2d;
4079       Offset = false;
4080     } else if (AArch64::ZPR3RegClass.hasSubClassEq(RC)) {
4081       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
4082       Opc = AArch64::LDR_ZZZXI;
4083       StackID = TargetStackID::ScalableVector;
4084     }
4085     break;
4086   case 64:
4087     if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
4088       assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
4089       Opc = AArch64::LD1Fourv2d;
4090       Offset = false;
4091     } else if (AArch64::ZPR4RegClass.hasSubClassEq(RC)) {
4092       assert(Subtarget.hasSVE() && "Unexpected register load without SVE");
4093       Opc = AArch64::LDR_ZZZZXI;
4094       StackID = TargetStackID::ScalableVector;
4095     }
4096     break;
4097   }
4098 
4099   assert(Opc && "Unknown register class");
4100   MFI.setStackID(FI, StackID);
4101 
4102   const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
4103                                      .addReg(DestReg, getDefRegState(true))
4104                                      .addFrameIndex(FI);
4105   if (Offset)
4106     MI.addImm(0);
4107   MI.addMemOperand(MMO);
4108 }
4109 
isNZCVTouchedInInstructionRange(const MachineInstr & DefMI,const MachineInstr & UseMI,const TargetRegisterInfo * TRI)4110 bool llvm::isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
4111                                            const MachineInstr &UseMI,
4112                                            const TargetRegisterInfo *TRI) {
4113   return any_of(instructionsWithoutDebug(std::next(DefMI.getIterator()),
4114                                          UseMI.getIterator()),
4115                 [TRI](const MachineInstr &I) {
4116                   return I.modifiesRegister(AArch64::NZCV, TRI) ||
4117                          I.readsRegister(AArch64::NZCV, TRI);
4118                 });
4119 }
4120 
decomposeStackOffsetForDwarfOffsets(const StackOffset & Offset,int64_t & ByteSized,int64_t & VGSized)4121 void AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(
4122     const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized) {
4123   // The smallest scalable element supported by scaled SVE addressing
4124   // modes are predicates, which are 2 scalable bytes in size. So the scalable
4125   // byte offset must always be a multiple of 2.
4126   assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
4127 
4128   // VGSized offsets are divided by '2', because the VG register is the
4129   // the number of 64bit granules as opposed to 128bit vector chunks,
4130   // which is how the 'n' in e.g. MVT::nxv1i8 is modelled.
4131   // So, for a stack offset of 16 MVT::nxv1i8's, the size is n x 16 bytes.
4132   // VG = n * 2 and the dwarf offset must be VG * 8 bytes.
4133   ByteSized = Offset.getFixed();
4134   VGSized = Offset.getScalable() / 2;
4135 }
4136 
4137 /// Returns the offset in parts to which this frame offset can be
4138 /// decomposed for the purpose of describing a frame offset.
4139 /// For non-scalable offsets this is simply its byte size.
decomposeStackOffsetForFrameOffsets(const StackOffset & Offset,int64_t & NumBytes,int64_t & NumPredicateVectors,int64_t & NumDataVectors)4140 void AArch64InstrInfo::decomposeStackOffsetForFrameOffsets(
4141     const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors,
4142     int64_t &NumDataVectors) {
4143   // The smallest scalable element supported by scaled SVE addressing
4144   // modes are predicates, which are 2 scalable bytes in size. So the scalable
4145   // byte offset must always be a multiple of 2.
4146   assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
4147 
4148   NumBytes = Offset.getFixed();
4149   NumDataVectors = 0;
4150   NumPredicateVectors = Offset.getScalable() / 2;
4151   // This method is used to get the offsets to adjust the frame offset.
4152   // If the function requires ADDPL to be used and needs more than two ADDPL
4153   // instructions, part of the offset is folded into NumDataVectors so that it
4154   // uses ADDVL for part of it, reducing the number of ADDPL instructions.
4155   if (NumPredicateVectors % 8 == 0 || NumPredicateVectors < -64 ||
4156       NumPredicateVectors > 62) {
4157     NumDataVectors = NumPredicateVectors / 8;
4158     NumPredicateVectors -= NumDataVectors * 8;
4159   }
4160 }
4161 
4162 // Convenience function to create a DWARF expression for
4163 //   Expr + NumBytes + NumVGScaledBytes * AArch64::VG
appendVGScaledOffsetExpr(SmallVectorImpl<char> & Expr,int NumBytes,int NumVGScaledBytes,unsigned VG,llvm::raw_string_ostream & Comment)4164 static void appendVGScaledOffsetExpr(SmallVectorImpl<char> &Expr, int NumBytes,
4165                                      int NumVGScaledBytes, unsigned VG,
4166                                      llvm::raw_string_ostream &Comment) {
4167   uint8_t buffer[16];
4168 
4169   if (NumBytes) {
4170     Expr.push_back(dwarf::DW_OP_consts);
4171     Expr.append(buffer, buffer + encodeSLEB128(NumBytes, buffer));
4172     Expr.push_back((uint8_t)dwarf::DW_OP_plus);
4173     Comment << (NumBytes < 0 ? " - " : " + ") << std::abs(NumBytes);
4174   }
4175 
4176   if (NumVGScaledBytes) {
4177     Expr.push_back((uint8_t)dwarf::DW_OP_consts);
4178     Expr.append(buffer, buffer + encodeSLEB128(NumVGScaledBytes, buffer));
4179 
4180     Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
4181     Expr.append(buffer, buffer + encodeULEB128(VG, buffer));
4182     Expr.push_back(0);
4183 
4184     Expr.push_back((uint8_t)dwarf::DW_OP_mul);
4185     Expr.push_back((uint8_t)dwarf::DW_OP_plus);
4186 
4187     Comment << (NumVGScaledBytes < 0 ? " - " : " + ")
4188             << std::abs(NumVGScaledBytes) << " * VG";
4189   }
4190 }
4191 
4192 // Creates an MCCFIInstruction:
4193 //    { DW_CFA_def_cfa_expression, ULEB128 (sizeof expr), expr }
createDefCFAExpression(const TargetRegisterInfo & TRI,unsigned Reg,const StackOffset & Offset)4194 static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
4195                                                unsigned Reg,
4196                                                const StackOffset &Offset) {
4197   int64_t NumBytes, NumVGScaledBytes;
4198   AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(Offset, NumBytes,
4199                                                         NumVGScaledBytes);
4200   std::string CommentBuffer;
4201   llvm::raw_string_ostream Comment(CommentBuffer);
4202 
4203   if (Reg == AArch64::SP)
4204     Comment << "sp";
4205   else if (Reg == AArch64::FP)
4206     Comment << "fp";
4207   else
4208     Comment << printReg(Reg, &TRI);
4209 
4210   // Build up the expression (Reg + NumBytes + NumVGScaledBytes * AArch64::VG)
4211   SmallString<64> Expr;
4212   unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
4213   Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
4214   Expr.push_back(0);
4215   appendVGScaledOffsetExpr(Expr, NumBytes, NumVGScaledBytes,
4216                            TRI.getDwarfRegNum(AArch64::VG, true), Comment);
4217 
4218   // Wrap this into DW_CFA_def_cfa.
4219   SmallString<64> DefCfaExpr;
4220   DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
4221   uint8_t buffer[16];
4222   DefCfaExpr.append(buffer, buffer + encodeULEB128(Expr.size(), buffer));
4223   DefCfaExpr.append(Expr.str());
4224   return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(),
4225                                         Comment.str());
4226 }
4227 
createDefCFA(const TargetRegisterInfo & TRI,unsigned FrameReg,unsigned Reg,const StackOffset & Offset,bool LastAdjustmentWasScalable)4228 MCCFIInstruction llvm::createDefCFA(const TargetRegisterInfo &TRI,
4229                                     unsigned FrameReg, unsigned Reg,
4230                                     const StackOffset &Offset,
4231                                     bool LastAdjustmentWasScalable) {
4232   if (Offset.getScalable())
4233     return createDefCFAExpression(TRI, Reg, Offset);
4234 
4235   if (FrameReg == Reg && !LastAdjustmentWasScalable)
4236     return MCCFIInstruction::cfiDefCfaOffset(nullptr, int(Offset.getFixed()));
4237 
4238   unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
4239   return MCCFIInstruction::cfiDefCfa(nullptr, DwarfReg, (int)Offset.getFixed());
4240 }
4241 
createCFAOffset(const TargetRegisterInfo & TRI,unsigned Reg,const StackOffset & OffsetFromDefCFA)4242 MCCFIInstruction llvm::createCFAOffset(const TargetRegisterInfo &TRI,
4243                                        unsigned Reg,
4244                                        const StackOffset &OffsetFromDefCFA) {
4245   int64_t NumBytes, NumVGScaledBytes;
4246   AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(
4247       OffsetFromDefCFA, NumBytes, NumVGScaledBytes);
4248 
4249   unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
4250 
4251   // Non-scalable offsets can use DW_CFA_offset directly.
4252   if (!NumVGScaledBytes)
4253     return MCCFIInstruction::createOffset(nullptr, DwarfReg, NumBytes);
4254 
4255   std::string CommentBuffer;
4256   llvm::raw_string_ostream Comment(CommentBuffer);
4257   Comment << printReg(Reg, &TRI) << "  @ cfa";
4258 
4259   // Build up expression (NumBytes + NumVGScaledBytes * AArch64::VG)
4260   SmallString<64> OffsetExpr;
4261   appendVGScaledOffsetExpr(OffsetExpr, NumBytes, NumVGScaledBytes,
4262                            TRI.getDwarfRegNum(AArch64::VG, true), Comment);
4263 
4264   // Wrap this into DW_CFA_expression
4265   SmallString<64> CfaExpr;
4266   CfaExpr.push_back(dwarf::DW_CFA_expression);
4267   uint8_t buffer[16];
4268   CfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer));
4269   CfaExpr.append(buffer, buffer + encodeULEB128(OffsetExpr.size(), buffer));
4270   CfaExpr.append(OffsetExpr.str());
4271 
4272   return MCCFIInstruction::createEscape(nullptr, CfaExpr.str(), Comment.str());
4273 }
4274 
4275 // Helper function to emit a frame offset adjustment from a given
4276 // pointer (SrcReg), stored into DestReg. This function is explicit
4277 // in that it requires the opcode.
emitFrameOffsetAdj(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,unsigned DestReg,unsigned SrcReg,int64_t Offset,unsigned Opc,const TargetInstrInfo * TII,MachineInstr::MIFlag Flag,bool NeedsWinCFI,bool * HasWinCFI,bool EmitCFAOffset,StackOffset CFAOffset,unsigned FrameReg)4278 static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
4279                                MachineBasicBlock::iterator MBBI,
4280                                const DebugLoc &DL, unsigned DestReg,
4281                                unsigned SrcReg, int64_t Offset, unsigned Opc,
4282                                const TargetInstrInfo *TII,
4283                                MachineInstr::MIFlag Flag, bool NeedsWinCFI,
4284                                bool *HasWinCFI, bool EmitCFAOffset,
4285                                StackOffset CFAOffset, unsigned FrameReg) {
4286   int Sign = 1;
4287   unsigned MaxEncoding, ShiftSize;
4288   switch (Opc) {
4289   case AArch64::ADDXri:
4290   case AArch64::ADDSXri:
4291   case AArch64::SUBXri:
4292   case AArch64::SUBSXri:
4293     MaxEncoding = 0xfff;
4294     ShiftSize = 12;
4295     break;
4296   case AArch64::ADDVL_XXI:
4297   case AArch64::ADDPL_XXI:
4298   case AArch64::ADDSVL_XXI:
4299   case AArch64::ADDSPL_XXI:
4300     MaxEncoding = 31;
4301     ShiftSize = 0;
4302     if (Offset < 0) {
4303       MaxEncoding = 32;
4304       Sign = -1;
4305       Offset = -Offset;
4306     }
4307     break;
4308   default:
4309     llvm_unreachable("Unsupported opcode");
4310   }
4311 
4312   // `Offset` can be in bytes or in "scalable bytes".
4313   int VScale = 1;
4314   if (Opc == AArch64::ADDVL_XXI || Opc == AArch64::ADDSVL_XXI)
4315     VScale = 16;
4316   else if (Opc == AArch64::ADDPL_XXI || Opc == AArch64::ADDSPL_XXI)
4317     VScale = 2;
4318 
4319   // FIXME: If the offset won't fit in 24-bits, compute the offset into a
4320   // scratch register.  If DestReg is a virtual register, use it as the
4321   // scratch register; otherwise, create a new virtual register (to be
4322   // replaced by the scavenger at the end of PEI).  That case can be optimized
4323   // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
4324   // register can be loaded with offset%8 and the add/sub can use an extending
4325   // instruction with LSL#3.
4326   // Currently the function handles any offsets but generates a poor sequence
4327   // of code.
4328   //  assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
4329 
4330   const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
4331   Register TmpReg = DestReg;
4332   if (TmpReg == AArch64::XZR)
4333     TmpReg = MBB.getParent()->getRegInfo().createVirtualRegister(
4334         &AArch64::GPR64RegClass);
4335   do {
4336     uint64_t ThisVal = std::min<uint64_t>(Offset, MaxEncodableValue);
4337     unsigned LocalShiftSize = 0;
4338     if (ThisVal > MaxEncoding) {
4339       ThisVal = ThisVal >> ShiftSize;
4340       LocalShiftSize = ShiftSize;
4341     }
4342     assert((ThisVal >> ShiftSize) <= MaxEncoding &&
4343            "Encoding cannot handle value that big");
4344 
4345     Offset -= ThisVal << LocalShiftSize;
4346     if (Offset == 0)
4347       TmpReg = DestReg;
4348     auto MBI = BuildMI(MBB, MBBI, DL, TII->get(Opc), TmpReg)
4349                    .addReg(SrcReg)
4350                    .addImm(Sign * (int)ThisVal);
4351     if (ShiftSize)
4352       MBI = MBI.addImm(
4353           AArch64_AM::getShifterImm(AArch64_AM::LSL, LocalShiftSize));
4354     MBI = MBI.setMIFlag(Flag);
4355 
4356     auto Change =
4357         VScale == 1
4358             ? StackOffset::getFixed(ThisVal << LocalShiftSize)
4359             : StackOffset::getScalable(VScale * (ThisVal << LocalShiftSize));
4360     if (Sign == -1 || Opc == AArch64::SUBXri || Opc == AArch64::SUBSXri)
4361       CFAOffset += Change;
4362     else
4363       CFAOffset -= Change;
4364     if (EmitCFAOffset && DestReg == TmpReg) {
4365       MachineFunction &MF = *MBB.getParent();
4366       const TargetSubtargetInfo &STI = MF.getSubtarget();
4367       const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
4368 
4369       unsigned CFIIndex = MF.addFrameInst(
4370           createDefCFA(TRI, FrameReg, DestReg, CFAOffset, VScale != 1));
4371       BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
4372           .addCFIIndex(CFIIndex)
4373           .setMIFlags(Flag);
4374     }
4375 
4376     if (NeedsWinCFI) {
4377       assert(Sign == 1 && "SEH directives should always have a positive sign");
4378       int Imm = (int)(ThisVal << LocalShiftSize);
4379       if ((DestReg == AArch64::FP && SrcReg == AArch64::SP) ||
4380           (SrcReg == AArch64::FP && DestReg == AArch64::SP)) {
4381         if (HasWinCFI)
4382           *HasWinCFI = true;
4383         if (Imm == 0)
4384           BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_SetFP)).setMIFlag(Flag);
4385         else
4386           BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_AddFP))
4387               .addImm(Imm)
4388               .setMIFlag(Flag);
4389         assert(Offset == 0 && "Expected remaining offset to be zero to "
4390                               "emit a single SEH directive");
4391       } else if (DestReg == AArch64::SP) {
4392         if (HasWinCFI)
4393           *HasWinCFI = true;
4394         assert(SrcReg == AArch64::SP && "Unexpected SrcReg for SEH_StackAlloc");
4395         BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc))
4396             .addImm(Imm)
4397             .setMIFlag(Flag);
4398       }
4399     }
4400 
4401     SrcReg = TmpReg;
4402   } while (Offset);
4403 }
4404 
emitFrameOffset(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,unsigned DestReg,unsigned SrcReg,StackOffset Offset,const TargetInstrInfo * TII,MachineInstr::MIFlag Flag,bool SetNZCV,bool NeedsWinCFI,bool * HasWinCFI,bool EmitCFAOffset,StackOffset CFAOffset,unsigned FrameReg)4405 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
4406                            MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
4407                            unsigned DestReg, unsigned SrcReg,
4408                            StackOffset Offset, const TargetInstrInfo *TII,
4409                            MachineInstr::MIFlag Flag, bool SetNZCV,
4410                            bool NeedsWinCFI, bool *HasWinCFI,
4411                            bool EmitCFAOffset, StackOffset CFAOffset,
4412                            unsigned FrameReg) {
4413   // If a function is marked as arm_locally_streaming, then the runtime value of
4414   // vscale in the prologue/epilogue is different the runtime value of vscale
4415   // in the function's body. To avoid having to consider multiple vscales,
4416   // we can use `addsvl` to allocate any scalable stack-slots, which under
4417   // most circumstances will be only locals, not callee-save slots.
4418   const Function &F = MBB.getParent()->getFunction();
4419   bool UseSVL = F.hasFnAttribute("aarch64_pstate_sm_body");
4420 
4421   int64_t Bytes, NumPredicateVectors, NumDataVectors;
4422   AArch64InstrInfo::decomposeStackOffsetForFrameOffsets(
4423       Offset, Bytes, NumPredicateVectors, NumDataVectors);
4424 
4425   // First emit non-scalable frame offsets, or a simple 'mov'.
4426   if (Bytes || (!Offset && SrcReg != DestReg)) {
4427     assert((DestReg != AArch64::SP || Bytes % 8 == 0) &&
4428            "SP increment/decrement not 8-byte aligned");
4429     unsigned Opc = SetNZCV ? AArch64::ADDSXri : AArch64::ADDXri;
4430     if (Bytes < 0) {
4431       Bytes = -Bytes;
4432       Opc = SetNZCV ? AArch64::SUBSXri : AArch64::SUBXri;
4433     }
4434     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, Bytes, Opc, TII, Flag,
4435                        NeedsWinCFI, HasWinCFI, EmitCFAOffset, CFAOffset,
4436                        FrameReg);
4437     CFAOffset += (Opc == AArch64::ADDXri || Opc == AArch64::ADDSXri)
4438                      ? StackOffset::getFixed(-Bytes)
4439                      : StackOffset::getFixed(Bytes);
4440     SrcReg = DestReg;
4441     FrameReg = DestReg;
4442   }
4443 
4444   assert(!(SetNZCV && (NumPredicateVectors || NumDataVectors)) &&
4445          "SetNZCV not supported with SVE vectors");
4446   assert(!(NeedsWinCFI && (NumPredicateVectors || NumDataVectors)) &&
4447          "WinCFI not supported with SVE vectors");
4448 
4449   if (NumDataVectors) {
4450     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumDataVectors,
4451                        UseSVL ? AArch64::ADDSVL_XXI : AArch64::ADDVL_XXI,
4452                        TII, Flag, NeedsWinCFI, nullptr, EmitCFAOffset,
4453                        CFAOffset, FrameReg);
4454     CFAOffset += StackOffset::getScalable(-NumDataVectors * 16);
4455     SrcReg = DestReg;
4456   }
4457 
4458   if (NumPredicateVectors) {
4459     assert(DestReg != AArch64::SP && "Unaligned access to SP");
4460     emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumPredicateVectors,
4461                        UseSVL ? AArch64::ADDSPL_XXI : AArch64::ADDPL_XXI,
4462                        TII, Flag, NeedsWinCFI, nullptr, EmitCFAOffset,
4463                        CFAOffset, FrameReg);
4464   }
4465 }
4466 
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,int FrameIndex,LiveIntervals * LIS,VirtRegMap * VRM) const4467 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
4468     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
4469     MachineBasicBlock::iterator InsertPt, int FrameIndex,
4470     LiveIntervals *LIS, VirtRegMap *VRM) const {
4471   // This is a bit of a hack. Consider this instruction:
4472   //
4473   //   %0 = COPY %sp; GPR64all:%0
4474   //
4475   // We explicitly chose GPR64all for the virtual register so such a copy might
4476   // be eliminated by RegisterCoalescer. However, that may not be possible, and
4477   // %0 may even spill. We can't spill %sp, and since it is in the GPR64all
4478   // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
4479   //
4480   // To prevent that, we are going to constrain the %0 register class here.
4481   //
4482   // <rdar://problem/11522048>
4483   //
4484   if (MI.isFullCopy()) {
4485     Register DstReg = MI.getOperand(0).getReg();
4486     Register SrcReg = MI.getOperand(1).getReg();
4487     if (SrcReg == AArch64::SP && DstReg.isVirtual()) {
4488       MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
4489       return nullptr;
4490     }
4491     if (DstReg == AArch64::SP && SrcReg.isVirtual()) {
4492       MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
4493       return nullptr;
4494     }
4495     // Nothing can folded with copy from/to NZCV.
4496     if (SrcReg == AArch64::NZCV || DstReg == AArch64::NZCV)
4497       return nullptr;
4498   }
4499 
4500   // Handle the case where a copy is being spilled or filled but the source
4501   // and destination register class don't match.  For example:
4502   //
4503   //   %0 = COPY %xzr; GPR64common:%0
4504   //
4505   // In this case we can still safely fold away the COPY and generate the
4506   // following spill code:
4507   //
4508   //   STRXui %xzr, %stack.0
4509   //
4510   // This also eliminates spilled cross register class COPYs (e.g. between x and
4511   // d regs) of the same size.  For example:
4512   //
4513   //   %0 = COPY %1; GPR64:%0, FPR64:%1
4514   //
4515   // will be filled as
4516   //
4517   //   LDRDui %0, fi<#0>
4518   //
4519   // instead of
4520   //
4521   //   LDRXui %Temp, fi<#0>
4522   //   %0 = FMOV %Temp
4523   //
4524   if (MI.isCopy() && Ops.size() == 1 &&
4525       // Make sure we're only folding the explicit COPY defs/uses.
4526       (Ops[0] == 0 || Ops[0] == 1)) {
4527     bool IsSpill = Ops[0] == 0;
4528     bool IsFill = !IsSpill;
4529     const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
4530     const MachineRegisterInfo &MRI = MF.getRegInfo();
4531     MachineBasicBlock &MBB = *MI.getParent();
4532     const MachineOperand &DstMO = MI.getOperand(0);
4533     const MachineOperand &SrcMO = MI.getOperand(1);
4534     Register DstReg = DstMO.getReg();
4535     Register SrcReg = SrcMO.getReg();
4536     // This is slightly expensive to compute for physical regs since
4537     // getMinimalPhysRegClass is slow.
4538     auto getRegClass = [&](unsigned Reg) {
4539       return Register::isVirtualRegister(Reg) ? MRI.getRegClass(Reg)
4540                                               : TRI.getMinimalPhysRegClass(Reg);
4541     };
4542 
4543     if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
4544       assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
4545                  TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
4546              "Mismatched register size in non subreg COPY");
4547       if (IsSpill)
4548         storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
4549                             getRegClass(SrcReg), &TRI, Register());
4550       else
4551         loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
4552                              getRegClass(DstReg), &TRI, Register());
4553       return &*--InsertPt;
4554     }
4555 
4556     // Handle cases like spilling def of:
4557     //
4558     //   %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
4559     //
4560     // where the physical register source can be widened and stored to the full
4561     // virtual reg destination stack slot, in this case producing:
4562     //
4563     //   STRXui %xzr, %stack.0
4564     //
4565     if (IsSpill && DstMO.isUndef() && SrcReg.isPhysical()) {
4566       assert(SrcMO.getSubReg() == 0 &&
4567              "Unexpected subreg on physical register");
4568       const TargetRegisterClass *SpillRC;
4569       unsigned SpillSubreg;
4570       switch (DstMO.getSubReg()) {
4571       default:
4572         SpillRC = nullptr;
4573         break;
4574       case AArch64::sub_32:
4575       case AArch64::ssub:
4576         if (AArch64::GPR32RegClass.contains(SrcReg)) {
4577           SpillRC = &AArch64::GPR64RegClass;
4578           SpillSubreg = AArch64::sub_32;
4579         } else if (AArch64::FPR32RegClass.contains(SrcReg)) {
4580           SpillRC = &AArch64::FPR64RegClass;
4581           SpillSubreg = AArch64::ssub;
4582         } else
4583           SpillRC = nullptr;
4584         break;
4585       case AArch64::dsub:
4586         if (AArch64::FPR64RegClass.contains(SrcReg)) {
4587           SpillRC = &AArch64::FPR128RegClass;
4588           SpillSubreg = AArch64::dsub;
4589         } else
4590           SpillRC = nullptr;
4591         break;
4592       }
4593 
4594       if (SpillRC)
4595         if (unsigned WidenedSrcReg =
4596                 TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) {
4597           storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(),
4598                               FrameIndex, SpillRC, &TRI, Register());
4599           return &*--InsertPt;
4600         }
4601     }
4602 
4603     // Handle cases like filling use of:
4604     //
4605     //   %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
4606     //
4607     // where we can load the full virtual reg source stack slot, into the subreg
4608     // destination, in this case producing:
4609     //
4610     //   LDRWui %0:sub_32<def,read-undef>, %stack.0
4611     //
4612     if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
4613       const TargetRegisterClass *FillRC;
4614       switch (DstMO.getSubReg()) {
4615       default:
4616         FillRC = nullptr;
4617         break;
4618       case AArch64::sub_32:
4619         FillRC = &AArch64::GPR32RegClass;
4620         break;
4621       case AArch64::ssub:
4622         FillRC = &AArch64::FPR32RegClass;
4623         break;
4624       case AArch64::dsub:
4625         FillRC = &AArch64::FPR64RegClass;
4626         break;
4627       }
4628 
4629       if (FillRC) {
4630         assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
4631                    TRI.getRegSizeInBits(*FillRC) &&
4632                "Mismatched regclass size on folded subreg COPY");
4633         loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI,
4634                              Register());
4635         MachineInstr &LoadMI = *--InsertPt;
4636         MachineOperand &LoadDst = LoadMI.getOperand(0);
4637         assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load");
4638         LoadDst.setSubReg(DstMO.getSubReg());
4639         LoadDst.setIsUndef();
4640         return &LoadMI;
4641       }
4642     }
4643   }
4644 
4645   // Cannot fold.
4646   return nullptr;
4647 }
4648 
isAArch64FrameOffsetLegal(const MachineInstr & MI,StackOffset & SOffset,bool * OutUseUnscaledOp,unsigned * OutUnscaledOp,int64_t * EmittableOffset)4649 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI,
4650                                     StackOffset &SOffset,
4651                                     bool *OutUseUnscaledOp,
4652                                     unsigned *OutUnscaledOp,
4653                                     int64_t *EmittableOffset) {
4654   // Set output values in case of early exit.
4655   if (EmittableOffset)
4656     *EmittableOffset = 0;
4657   if (OutUseUnscaledOp)
4658     *OutUseUnscaledOp = false;
4659   if (OutUnscaledOp)
4660     *OutUnscaledOp = 0;
4661 
4662   // Exit early for structured vector spills/fills as they can't take an
4663   // immediate offset.
4664   switch (MI.getOpcode()) {
4665   default:
4666     break;
4667   case AArch64::LD1Twov2d:
4668   case AArch64::LD1Threev2d:
4669   case AArch64::LD1Fourv2d:
4670   case AArch64::LD1Twov1d:
4671   case AArch64::LD1Threev1d:
4672   case AArch64::LD1Fourv1d:
4673   case AArch64::ST1Twov2d:
4674   case AArch64::ST1Threev2d:
4675   case AArch64::ST1Fourv2d:
4676   case AArch64::ST1Twov1d:
4677   case AArch64::ST1Threev1d:
4678   case AArch64::ST1Fourv1d:
4679   case AArch64::ST1i8:
4680   case AArch64::ST1i16:
4681   case AArch64::ST1i32:
4682   case AArch64::ST1i64:
4683   case AArch64::IRG:
4684   case AArch64::IRGstack:
4685   case AArch64::STGloop:
4686   case AArch64::STZGloop:
4687     return AArch64FrameOffsetCannotUpdate;
4688   }
4689 
4690   // Get the min/max offset and the scale.
4691   TypeSize ScaleValue(0U, false);
4692   unsigned Width;
4693   int64_t MinOff, MaxOff;
4694   if (!AArch64InstrInfo::getMemOpInfo(MI.getOpcode(), ScaleValue, Width, MinOff,
4695                                       MaxOff))
4696     llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
4697 
4698   // Construct the complete offset.
4699   bool IsMulVL = ScaleValue.isScalable();
4700   unsigned Scale = ScaleValue.getKnownMinValue();
4701   int64_t Offset = IsMulVL ? SOffset.getScalable() : SOffset.getFixed();
4702 
4703   const MachineOperand &ImmOpnd =
4704       MI.getOperand(AArch64InstrInfo::getLoadStoreImmIdx(MI.getOpcode()));
4705   Offset += ImmOpnd.getImm() * Scale;
4706 
4707   // If the offset doesn't match the scale, we rewrite the instruction to
4708   // use the unscaled instruction instead. Likewise, if we have a negative
4709   // offset and there is an unscaled op to use.
4710   std::optional<unsigned> UnscaledOp =
4711       AArch64InstrInfo::getUnscaledLdSt(MI.getOpcode());
4712   bool useUnscaledOp = UnscaledOp && (Offset % Scale || Offset < 0);
4713   if (useUnscaledOp &&
4714       !AArch64InstrInfo::getMemOpInfo(*UnscaledOp, ScaleValue, Width, MinOff,
4715                                       MaxOff))
4716     llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
4717 
4718   Scale = ScaleValue.getKnownMinValue();
4719   assert(IsMulVL == ScaleValue.isScalable() &&
4720          "Unscaled opcode has different value for scalable");
4721 
4722   int64_t Remainder = Offset % Scale;
4723   assert(!(Remainder && useUnscaledOp) &&
4724          "Cannot have remainder when using unscaled op");
4725 
4726   assert(MinOff < MaxOff && "Unexpected Min/Max offsets");
4727   int64_t NewOffset = Offset / Scale;
4728   if (MinOff <= NewOffset && NewOffset <= MaxOff)
4729     Offset = Remainder;
4730   else {
4731     NewOffset = NewOffset < 0 ? MinOff : MaxOff;
4732     Offset = Offset - NewOffset * Scale + Remainder;
4733   }
4734 
4735   if (EmittableOffset)
4736     *EmittableOffset = NewOffset;
4737   if (OutUseUnscaledOp)
4738     *OutUseUnscaledOp = useUnscaledOp;
4739   if (OutUnscaledOp && UnscaledOp)
4740     *OutUnscaledOp = *UnscaledOp;
4741 
4742   if (IsMulVL)
4743     SOffset = StackOffset::get(SOffset.getFixed(), Offset);
4744   else
4745     SOffset = StackOffset::get(Offset, SOffset.getScalable());
4746   return AArch64FrameOffsetCanUpdate |
4747          (SOffset ? 0 : AArch64FrameOffsetIsLegal);
4748 }
4749 
rewriteAArch64FrameIndex(MachineInstr & MI,unsigned FrameRegIdx,unsigned FrameReg,StackOffset & Offset,const AArch64InstrInfo * TII)4750 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
4751                                     unsigned FrameReg, StackOffset &Offset,
4752                                     const AArch64InstrInfo *TII) {
4753   unsigned Opcode = MI.getOpcode();
4754   unsigned ImmIdx = FrameRegIdx + 1;
4755 
4756   if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
4757     Offset += StackOffset::getFixed(MI.getOperand(ImmIdx).getImm());
4758     emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
4759                     MI.getOperand(0).getReg(), FrameReg, Offset, TII,
4760                     MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
4761     MI.eraseFromParent();
4762     Offset = StackOffset();
4763     return true;
4764   }
4765 
4766   int64_t NewOffset;
4767   unsigned UnscaledOp;
4768   bool UseUnscaledOp;
4769   int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
4770                                          &UnscaledOp, &NewOffset);
4771   if (Status & AArch64FrameOffsetCanUpdate) {
4772     if (Status & AArch64FrameOffsetIsLegal)
4773       // Replace the FrameIndex with FrameReg.
4774       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
4775     if (UseUnscaledOp)
4776       MI.setDesc(TII->get(UnscaledOp));
4777 
4778     MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
4779     return !Offset;
4780   }
4781 
4782   return false;
4783 }
4784 
getNop() const4785 MCInst AArch64InstrInfo::getNop() const {
4786   return MCInstBuilder(AArch64::HINT).addImm(0);
4787 }
4788 
4789 // AArch64 supports MachineCombiner.
useMachineCombiner() const4790 bool AArch64InstrInfo::useMachineCombiner() const { return true; }
4791 
4792 // True when Opc sets flag
isCombineInstrSettingFlag(unsigned Opc)4793 static bool isCombineInstrSettingFlag(unsigned Opc) {
4794   switch (Opc) {
4795   case AArch64::ADDSWrr:
4796   case AArch64::ADDSWri:
4797   case AArch64::ADDSXrr:
4798   case AArch64::ADDSXri:
4799   case AArch64::SUBSWrr:
4800   case AArch64::SUBSXrr:
4801   // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
4802   case AArch64::SUBSWri:
4803   case AArch64::SUBSXri:
4804     return true;
4805   default:
4806     break;
4807   }
4808   return false;
4809 }
4810 
4811 // 32b Opcodes that can be combined with a MUL
isCombineInstrCandidate32(unsigned Opc)4812 static bool isCombineInstrCandidate32(unsigned Opc) {
4813   switch (Opc) {
4814   case AArch64::ADDWrr:
4815   case AArch64::ADDWri:
4816   case AArch64::SUBWrr:
4817   case AArch64::ADDSWrr:
4818   case AArch64::ADDSWri:
4819   case AArch64::SUBSWrr:
4820   // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
4821   case AArch64::SUBWri:
4822   case AArch64::SUBSWri:
4823     return true;
4824   default:
4825     break;
4826   }
4827   return false;
4828 }
4829 
4830 // 64b Opcodes that can be combined with a MUL
isCombineInstrCandidate64(unsigned Opc)4831 static bool isCombineInstrCandidate64(unsigned Opc) {
4832   switch (Opc) {
4833   case AArch64::ADDXrr:
4834   case AArch64::ADDXri:
4835   case AArch64::SUBXrr:
4836   case AArch64::ADDSXrr:
4837   case AArch64::ADDSXri:
4838   case AArch64::SUBSXrr:
4839   // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
4840   case AArch64::SUBXri:
4841   case AArch64::SUBSXri:
4842   case AArch64::ADDv8i8:
4843   case AArch64::ADDv16i8:
4844   case AArch64::ADDv4i16:
4845   case AArch64::ADDv8i16:
4846   case AArch64::ADDv2i32:
4847   case AArch64::ADDv4i32:
4848   case AArch64::SUBv8i8:
4849   case AArch64::SUBv16i8:
4850   case AArch64::SUBv4i16:
4851   case AArch64::SUBv8i16:
4852   case AArch64::SUBv2i32:
4853   case AArch64::SUBv4i32:
4854     return true;
4855   default:
4856     break;
4857   }
4858   return false;
4859 }
4860 
4861 // FP Opcodes that can be combined with a FMUL.
isCombineInstrCandidateFP(const MachineInstr & Inst)4862 static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
4863   switch (Inst.getOpcode()) {
4864   default:
4865     break;
4866   case AArch64::FADDHrr:
4867   case AArch64::FADDSrr:
4868   case AArch64::FADDDrr:
4869   case AArch64::FADDv4f16:
4870   case AArch64::FADDv8f16:
4871   case AArch64::FADDv2f32:
4872   case AArch64::FADDv2f64:
4873   case AArch64::FADDv4f32:
4874   case AArch64::FSUBHrr:
4875   case AArch64::FSUBSrr:
4876   case AArch64::FSUBDrr:
4877   case AArch64::FSUBv4f16:
4878   case AArch64::FSUBv8f16:
4879   case AArch64::FSUBv2f32:
4880   case AArch64::FSUBv2f64:
4881   case AArch64::FSUBv4f32:
4882     TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
4883     // We can fuse FADD/FSUB with FMUL, if fusion is either allowed globally by
4884     // the target options or if FADD/FSUB has the contract fast-math flag.
4885     return Options.UnsafeFPMath ||
4886            Options.AllowFPOpFusion == FPOpFusion::Fast ||
4887            Inst.getFlag(MachineInstr::FmContract);
4888     return true;
4889   }
4890   return false;
4891 }
4892 
4893 // Opcodes that can be combined with a MUL
isCombineInstrCandidate(unsigned Opc)4894 static bool isCombineInstrCandidate(unsigned Opc) {
4895   return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
4896 }
4897 
4898 //
4899 // Utility routine that checks if \param MO is defined by an
4900 // \param CombineOpc instruction in the basic block \param MBB
canCombine(MachineBasicBlock & MBB,MachineOperand & MO,unsigned CombineOpc,unsigned ZeroReg=0,bool CheckZeroReg=false)4901 static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
4902                        unsigned CombineOpc, unsigned ZeroReg = 0,
4903                        bool CheckZeroReg = false) {
4904   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4905   MachineInstr *MI = nullptr;
4906 
4907   if (MO.isReg() && MO.getReg().isVirtual())
4908     MI = MRI.getUniqueVRegDef(MO.getReg());
4909   // And it needs to be in the trace (otherwise, it won't have a depth).
4910   if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
4911     return false;
4912   // Must only used by the user we combine with.
4913   if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
4914     return false;
4915 
4916   if (CheckZeroReg) {
4917     assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
4918            MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
4919            MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
4920     // The third input reg must be zero.
4921     if (MI->getOperand(3).getReg() != ZeroReg)
4922       return false;
4923   }
4924 
4925   if (isCombineInstrSettingFlag(CombineOpc) &&
4926       MI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
4927     return false;
4928 
4929   return true;
4930 }
4931 
4932 //
4933 // Is \param MO defined by an integer multiply and can be combined?
canCombineWithMUL(MachineBasicBlock & MBB,MachineOperand & MO,unsigned MulOpc,unsigned ZeroReg)4934 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
4935                               unsigned MulOpc, unsigned ZeroReg) {
4936   return canCombine(MBB, MO, MulOpc, ZeroReg, true);
4937 }
4938 
4939 //
4940 // Is \param MO defined by a floating-point multiply and can be combined?
canCombineWithFMUL(MachineBasicBlock & MBB,MachineOperand & MO,unsigned MulOpc)4941 static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
4942                                unsigned MulOpc) {
4943   return canCombine(MBB, MO, MulOpc);
4944 }
4945 
4946 // TODO: There are many more machine instruction opcodes to match:
4947 //       1. Other data types (integer, vectors)
4948 //       2. Other math / logic operations (xor, or)
4949 //       3. Other forms of the same operation (intrinsics and other variants)
isAssociativeAndCommutative(const MachineInstr & Inst,bool Invert) const4950 bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
4951                                                    bool Invert) const {
4952   if (Invert)
4953     return false;
4954   switch (Inst.getOpcode()) {
4955   // == Floating-point types ==
4956   // -- Floating-point instructions --
4957   case AArch64::FADDHrr:
4958   case AArch64::FADDSrr:
4959   case AArch64::FADDDrr:
4960   case AArch64::FMULHrr:
4961   case AArch64::FMULSrr:
4962   case AArch64::FMULDrr:
4963   case AArch64::FMULX16:
4964   case AArch64::FMULX32:
4965   case AArch64::FMULX64:
4966   // -- Advanced SIMD instructions --
4967   case AArch64::FADDv4f16:
4968   case AArch64::FADDv8f16:
4969   case AArch64::FADDv2f32:
4970   case AArch64::FADDv4f32:
4971   case AArch64::FADDv2f64:
4972   case AArch64::FMULv4f16:
4973   case AArch64::FMULv8f16:
4974   case AArch64::FMULv2f32:
4975   case AArch64::FMULv4f32:
4976   case AArch64::FMULv2f64:
4977   case AArch64::FMULXv4f16:
4978   case AArch64::FMULXv8f16:
4979   case AArch64::FMULXv2f32:
4980   case AArch64::FMULXv4f32:
4981   case AArch64::FMULXv2f64:
4982   // -- SVE instructions --
4983   // Opcodes FMULX_ZZZ_? don't exist because there is no unpredicated FMULX
4984   // in the SVE instruction set (though there are predicated ones).
4985   case AArch64::FADD_ZZZ_H:
4986   case AArch64::FADD_ZZZ_S:
4987   case AArch64::FADD_ZZZ_D:
4988   case AArch64::FMUL_ZZZ_H:
4989   case AArch64::FMUL_ZZZ_S:
4990   case AArch64::FMUL_ZZZ_D:
4991     return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath ||
4992            (Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
4993             Inst.getFlag(MachineInstr::MIFlag::FmNsz));
4994 
4995   // == Integer types ==
4996   // -- Base instructions --
4997   // Opcodes MULWrr and MULXrr don't exist because
4998   // `MUL <Wd>, <Wn>, <Wm>` and `MUL <Xd>, <Xn>, <Xm>` are aliases of
4999   // `MADD <Wd>, <Wn>, <Wm>, WZR` and `MADD <Xd>, <Xn>, <Xm>, XZR` respectively.
5000   // The machine-combiner does not support three-source-operands machine
5001   // instruction. So we cannot reassociate MULs.
5002   case AArch64::ADDWrr:
5003   case AArch64::ADDXrr:
5004   case AArch64::ANDWrr:
5005   case AArch64::ANDXrr:
5006   case AArch64::ORRWrr:
5007   case AArch64::ORRXrr:
5008   case AArch64::EORWrr:
5009   case AArch64::EORXrr:
5010   case AArch64::EONWrr:
5011   case AArch64::EONXrr:
5012   // -- Advanced SIMD instructions --
5013   // Opcodes MULv1i64 and MULv2i64 don't exist because there is no 64-bit MUL
5014   // in the Advanced SIMD instruction set.
5015   case AArch64::ADDv8i8:
5016   case AArch64::ADDv16i8:
5017   case AArch64::ADDv4i16:
5018   case AArch64::ADDv8i16:
5019   case AArch64::ADDv2i32:
5020   case AArch64::ADDv4i32:
5021   case AArch64::ADDv1i64:
5022   case AArch64::ADDv2i64:
5023   case AArch64::MULv8i8:
5024   case AArch64::MULv16i8:
5025   case AArch64::MULv4i16:
5026   case AArch64::MULv8i16:
5027   case AArch64::MULv2i32:
5028   case AArch64::MULv4i32:
5029   case AArch64::ANDv8i8:
5030   case AArch64::ANDv16i8:
5031   case AArch64::ORRv8i8:
5032   case AArch64::ORRv16i8:
5033   case AArch64::EORv8i8:
5034   case AArch64::EORv16i8:
5035   // -- SVE instructions --
5036   case AArch64::ADD_ZZZ_B:
5037   case AArch64::ADD_ZZZ_H:
5038   case AArch64::ADD_ZZZ_S:
5039   case AArch64::ADD_ZZZ_D:
5040   case AArch64::MUL_ZZZ_B:
5041   case AArch64::MUL_ZZZ_H:
5042   case AArch64::MUL_ZZZ_S:
5043   case AArch64::MUL_ZZZ_D:
5044   case AArch64::AND_ZZZ:
5045   case AArch64::ORR_ZZZ:
5046   case AArch64::EOR_ZZZ:
5047     return true;
5048 
5049   default:
5050     return false;
5051   }
5052 }
5053 
5054 /// Find instructions that can be turned into madd.
getMaddPatterns(MachineInstr & Root,SmallVectorImpl<MachineCombinerPattern> & Patterns)5055 static bool getMaddPatterns(MachineInstr &Root,
5056                             SmallVectorImpl<MachineCombinerPattern> &Patterns) {
5057   unsigned Opc = Root.getOpcode();
5058   MachineBasicBlock &MBB = *Root.getParent();
5059   bool Found = false;
5060 
5061   if (!isCombineInstrCandidate(Opc))
5062     return false;
5063   if (isCombineInstrSettingFlag(Opc)) {
5064     int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
5065     // When NZCV is live bail out.
5066     if (Cmp_NZCV == -1)
5067       return false;
5068     unsigned NewOpc = convertToNonFlagSettingOpc(Root);
5069     // When opcode can't change bail out.
5070     // CHECKME: do we miss any cases for opcode conversion?
5071     if (NewOpc == Opc)
5072       return false;
5073     Opc = NewOpc;
5074   }
5075 
5076   auto setFound = [&](int Opcode, int Operand, unsigned ZeroReg,
5077                       MachineCombinerPattern Pattern) {
5078     if (canCombineWithMUL(MBB, Root.getOperand(Operand), Opcode, ZeroReg)) {
5079       Patterns.push_back(Pattern);
5080       Found = true;
5081     }
5082   };
5083 
5084   auto setVFound = [&](int Opcode, int Operand, MachineCombinerPattern Pattern) {
5085     if (canCombine(MBB, Root.getOperand(Operand), Opcode)) {
5086       Patterns.push_back(Pattern);
5087       Found = true;
5088     }
5089   };
5090 
5091   typedef MachineCombinerPattern MCP;
5092 
5093   switch (Opc) {
5094   default:
5095     break;
5096   case AArch64::ADDWrr:
5097     assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
5098            "ADDWrr does not have register operands");
5099     setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULADDW_OP1);
5100     setFound(AArch64::MADDWrrr, 2, AArch64::WZR, MCP::MULADDW_OP2);
5101     break;
5102   case AArch64::ADDXrr:
5103     setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULADDX_OP1);
5104     setFound(AArch64::MADDXrrr, 2, AArch64::XZR, MCP::MULADDX_OP2);
5105     break;
5106   case AArch64::SUBWrr:
5107     setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULSUBW_OP1);
5108     setFound(AArch64::MADDWrrr, 2, AArch64::WZR, MCP::MULSUBW_OP2);
5109     break;
5110   case AArch64::SUBXrr:
5111     setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULSUBX_OP1);
5112     setFound(AArch64::MADDXrrr, 2, AArch64::XZR, MCP::MULSUBX_OP2);
5113     break;
5114   case AArch64::ADDWri:
5115     setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULADDWI_OP1);
5116     break;
5117   case AArch64::ADDXri:
5118     setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULADDXI_OP1);
5119     break;
5120   case AArch64::SUBWri:
5121     setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULSUBWI_OP1);
5122     break;
5123   case AArch64::SUBXri:
5124     setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULSUBXI_OP1);
5125     break;
5126   case AArch64::ADDv8i8:
5127     setVFound(AArch64::MULv8i8, 1, MCP::MULADDv8i8_OP1);
5128     setVFound(AArch64::MULv8i8, 2, MCP::MULADDv8i8_OP2);
5129     break;
5130   case AArch64::ADDv16i8:
5131     setVFound(AArch64::MULv16i8, 1, MCP::MULADDv16i8_OP1);
5132     setVFound(AArch64::MULv16i8, 2, MCP::MULADDv16i8_OP2);
5133     break;
5134   case AArch64::ADDv4i16:
5135     setVFound(AArch64::MULv4i16, 1, MCP::MULADDv4i16_OP1);
5136     setVFound(AArch64::MULv4i16, 2, MCP::MULADDv4i16_OP2);
5137     setVFound(AArch64::MULv4i16_indexed, 1, MCP::MULADDv4i16_indexed_OP1);
5138     setVFound(AArch64::MULv4i16_indexed, 2, MCP::MULADDv4i16_indexed_OP2);
5139     break;
5140   case AArch64::ADDv8i16:
5141     setVFound(AArch64::MULv8i16, 1, MCP::MULADDv8i16_OP1);
5142     setVFound(AArch64::MULv8i16, 2, MCP::MULADDv8i16_OP2);
5143     setVFound(AArch64::MULv8i16_indexed, 1, MCP::MULADDv8i16_indexed_OP1);
5144     setVFound(AArch64::MULv8i16_indexed, 2, MCP::MULADDv8i16_indexed_OP2);
5145     break;
5146   case AArch64::ADDv2i32:
5147     setVFound(AArch64::MULv2i32, 1, MCP::MULADDv2i32_OP1);
5148     setVFound(AArch64::MULv2i32, 2, MCP::MULADDv2i32_OP2);
5149     setVFound(AArch64::MULv2i32_indexed, 1, MCP::MULADDv2i32_indexed_OP1);
5150     setVFound(AArch64::MULv2i32_indexed, 2, MCP::MULADDv2i32_indexed_OP2);
5151     break;
5152   case AArch64::ADDv4i32:
5153     setVFound(AArch64::MULv4i32, 1, MCP::MULADDv4i32_OP1);
5154     setVFound(AArch64::MULv4i32, 2, MCP::MULADDv4i32_OP2);
5155     setVFound(AArch64::MULv4i32_indexed, 1, MCP::MULADDv4i32_indexed_OP1);
5156     setVFound(AArch64::MULv4i32_indexed, 2, MCP::MULADDv4i32_indexed_OP2);
5157     break;
5158   case AArch64::SUBv8i8:
5159     setVFound(AArch64::MULv8i8, 1, MCP::MULSUBv8i8_OP1);
5160     setVFound(AArch64::MULv8i8, 2, MCP::MULSUBv8i8_OP2);
5161     break;
5162   case AArch64::SUBv16i8:
5163     setVFound(AArch64::MULv16i8, 1, MCP::MULSUBv16i8_OP1);
5164     setVFound(AArch64::MULv16i8, 2, MCP::MULSUBv16i8_OP2);
5165     break;
5166   case AArch64::SUBv4i16:
5167     setVFound(AArch64::MULv4i16, 1, MCP::MULSUBv4i16_OP1);
5168     setVFound(AArch64::MULv4i16, 2, MCP::MULSUBv4i16_OP2);
5169     setVFound(AArch64::MULv4i16_indexed, 1, MCP::MULSUBv4i16_indexed_OP1);
5170     setVFound(AArch64::MULv4i16_indexed, 2, MCP::MULSUBv4i16_indexed_OP2);
5171     break;
5172   case AArch64::SUBv8i16:
5173     setVFound(AArch64::MULv8i16, 1, MCP::MULSUBv8i16_OP1);
5174     setVFound(AArch64::MULv8i16, 2, MCP::MULSUBv8i16_OP2);
5175     setVFound(AArch64::MULv8i16_indexed, 1, MCP::MULSUBv8i16_indexed_OP1);
5176     setVFound(AArch64::MULv8i16_indexed, 2, MCP::MULSUBv8i16_indexed_OP2);
5177     break;
5178   case AArch64::SUBv2i32:
5179     setVFound(AArch64::MULv2i32, 1, MCP::MULSUBv2i32_OP1);
5180     setVFound(AArch64::MULv2i32, 2, MCP::MULSUBv2i32_OP2);
5181     setVFound(AArch64::MULv2i32_indexed, 1, MCP::MULSUBv2i32_indexed_OP1);
5182     setVFound(AArch64::MULv2i32_indexed, 2, MCP::MULSUBv2i32_indexed_OP2);
5183     break;
5184   case AArch64::SUBv4i32:
5185     setVFound(AArch64::MULv4i32, 1, MCP::MULSUBv4i32_OP1);
5186     setVFound(AArch64::MULv4i32, 2, MCP::MULSUBv4i32_OP2);
5187     setVFound(AArch64::MULv4i32_indexed, 1, MCP::MULSUBv4i32_indexed_OP1);
5188     setVFound(AArch64::MULv4i32_indexed, 2, MCP::MULSUBv4i32_indexed_OP2);
5189     break;
5190   }
5191   return Found;
5192 }
5193 /// Floating-Point Support
5194 
5195 /// Find instructions that can be turned into madd.
getFMAPatterns(MachineInstr & Root,SmallVectorImpl<MachineCombinerPattern> & Patterns)5196 static bool getFMAPatterns(MachineInstr &Root,
5197                            SmallVectorImpl<MachineCombinerPattern> &Patterns) {
5198 
5199   if (!isCombineInstrCandidateFP(Root))
5200     return false;
5201 
5202   MachineBasicBlock &MBB = *Root.getParent();
5203   bool Found = false;
5204 
5205   auto Match = [&](int Opcode, int Operand,
5206                    MachineCombinerPattern Pattern) -> bool {
5207     if (canCombineWithFMUL(MBB, Root.getOperand(Operand), Opcode)) {
5208       Patterns.push_back(Pattern);
5209       return true;
5210     }
5211     return false;
5212   };
5213 
5214   typedef MachineCombinerPattern MCP;
5215 
5216   switch (Root.getOpcode()) {
5217   default:
5218     assert(false && "Unsupported FP instruction in combiner\n");
5219     break;
5220   case AArch64::FADDHrr:
5221     assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
5222            "FADDHrr does not have register operands");
5223 
5224     Found  = Match(AArch64::FMULHrr, 1, MCP::FMULADDH_OP1);
5225     Found |= Match(AArch64::FMULHrr, 2, MCP::FMULADDH_OP2);
5226     break;
5227   case AArch64::FADDSrr:
5228     assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
5229            "FADDSrr does not have register operands");
5230 
5231     Found |= Match(AArch64::FMULSrr, 1, MCP::FMULADDS_OP1) ||
5232              Match(AArch64::FMULv1i32_indexed, 1, MCP::FMLAv1i32_indexed_OP1);
5233 
5234     Found |= Match(AArch64::FMULSrr, 2, MCP::FMULADDS_OP2) ||
5235              Match(AArch64::FMULv1i32_indexed, 2, MCP::FMLAv1i32_indexed_OP2);
5236     break;
5237   case AArch64::FADDDrr:
5238     Found |= Match(AArch64::FMULDrr, 1, MCP::FMULADDD_OP1) ||
5239              Match(AArch64::FMULv1i64_indexed, 1, MCP::FMLAv1i64_indexed_OP1);
5240 
5241     Found |= Match(AArch64::FMULDrr, 2, MCP::FMULADDD_OP2) ||
5242              Match(AArch64::FMULv1i64_indexed, 2, MCP::FMLAv1i64_indexed_OP2);
5243     break;
5244   case AArch64::FADDv4f16:
5245     Found |= Match(AArch64::FMULv4i16_indexed, 1, MCP::FMLAv4i16_indexed_OP1) ||
5246              Match(AArch64::FMULv4f16, 1, MCP::FMLAv4f16_OP1);
5247 
5248     Found |= Match(AArch64::FMULv4i16_indexed, 2, MCP::FMLAv4i16_indexed_OP2) ||
5249              Match(AArch64::FMULv4f16, 2, MCP::FMLAv4f16_OP2);
5250     break;
5251   case AArch64::FADDv8f16:
5252     Found |= Match(AArch64::FMULv8i16_indexed, 1, MCP::FMLAv8i16_indexed_OP1) ||
5253              Match(AArch64::FMULv8f16, 1, MCP::FMLAv8f16_OP1);
5254 
5255     Found |= Match(AArch64::FMULv8i16_indexed, 2, MCP::FMLAv8i16_indexed_OP2) ||
5256              Match(AArch64::FMULv8f16, 2, MCP::FMLAv8f16_OP2);
5257     break;
5258   case AArch64::FADDv2f32:
5259     Found |= Match(AArch64::FMULv2i32_indexed, 1, MCP::FMLAv2i32_indexed_OP1) ||
5260              Match(AArch64::FMULv2f32, 1, MCP::FMLAv2f32_OP1);
5261 
5262     Found |= Match(AArch64::FMULv2i32_indexed, 2, MCP::FMLAv2i32_indexed_OP2) ||
5263              Match(AArch64::FMULv2f32, 2, MCP::FMLAv2f32_OP2);
5264     break;
5265   case AArch64::FADDv2f64:
5266     Found |= Match(AArch64::FMULv2i64_indexed, 1, MCP::FMLAv2i64_indexed_OP1) ||
5267              Match(AArch64::FMULv2f64, 1, MCP::FMLAv2f64_OP1);
5268 
5269     Found |= Match(AArch64::FMULv2i64_indexed, 2, MCP::FMLAv2i64_indexed_OP2) ||
5270              Match(AArch64::FMULv2f64, 2, MCP::FMLAv2f64_OP2);
5271     break;
5272   case AArch64::FADDv4f32:
5273     Found |= Match(AArch64::FMULv4i32_indexed, 1, MCP::FMLAv4i32_indexed_OP1) ||
5274              Match(AArch64::FMULv4f32, 1, MCP::FMLAv4f32_OP1);
5275 
5276     Found |= Match(AArch64::FMULv4i32_indexed, 2, MCP::FMLAv4i32_indexed_OP2) ||
5277              Match(AArch64::FMULv4f32, 2, MCP::FMLAv4f32_OP2);
5278     break;
5279   case AArch64::FSUBHrr:
5280     Found  = Match(AArch64::FMULHrr, 1, MCP::FMULSUBH_OP1);
5281     Found |= Match(AArch64::FMULHrr, 2, MCP::FMULSUBH_OP2);
5282     Found |= Match(AArch64::FNMULHrr, 1, MCP::FNMULSUBH_OP1);
5283     break;
5284   case AArch64::FSUBSrr:
5285     Found = Match(AArch64::FMULSrr, 1, MCP::FMULSUBS_OP1);
5286 
5287     Found |= Match(AArch64::FMULSrr, 2, MCP::FMULSUBS_OP2) ||
5288              Match(AArch64::FMULv1i32_indexed, 2, MCP::FMLSv1i32_indexed_OP2);
5289 
5290     Found |= Match(AArch64::FNMULSrr, 1, MCP::FNMULSUBS_OP1);
5291     break;
5292   case AArch64::FSUBDrr:
5293     Found = Match(AArch64::FMULDrr, 1, MCP::FMULSUBD_OP1);
5294 
5295     Found |= Match(AArch64::FMULDrr, 2, MCP::FMULSUBD_OP2) ||
5296              Match(AArch64::FMULv1i64_indexed, 2, MCP::FMLSv1i64_indexed_OP2);
5297 
5298     Found |= Match(AArch64::FNMULDrr, 1, MCP::FNMULSUBD_OP1);
5299     break;
5300   case AArch64::FSUBv4f16:
5301     Found |= Match(AArch64::FMULv4i16_indexed, 2, MCP::FMLSv4i16_indexed_OP2) ||
5302              Match(AArch64::FMULv4f16, 2, MCP::FMLSv4f16_OP2);
5303 
5304     Found |= Match(AArch64::FMULv4i16_indexed, 1, MCP::FMLSv4i16_indexed_OP1) ||
5305              Match(AArch64::FMULv4f16, 1, MCP::FMLSv4f16_OP1);
5306     break;
5307   case AArch64::FSUBv8f16:
5308     Found |= Match(AArch64::FMULv8i16_indexed, 2, MCP::FMLSv8i16_indexed_OP2) ||
5309              Match(AArch64::FMULv8f16, 2, MCP::FMLSv8f16_OP2);
5310 
5311     Found |= Match(AArch64::FMULv8i16_indexed, 1, MCP::FMLSv8i16_indexed_OP1) ||
5312              Match(AArch64::FMULv8f16, 1, MCP::FMLSv8f16_OP1);
5313     break;
5314   case AArch64::FSUBv2f32:
5315     Found |= Match(AArch64::FMULv2i32_indexed, 2, MCP::FMLSv2i32_indexed_OP2) ||
5316              Match(AArch64::FMULv2f32, 2, MCP::FMLSv2f32_OP2);
5317 
5318     Found |= Match(AArch64::FMULv2i32_indexed, 1, MCP::FMLSv2i32_indexed_OP1) ||
5319              Match(AArch64::FMULv2f32, 1, MCP::FMLSv2f32_OP1);
5320     break;
5321   case AArch64::FSUBv2f64:
5322     Found |= Match(AArch64::FMULv2i64_indexed, 2, MCP::FMLSv2i64_indexed_OP2) ||
5323              Match(AArch64::FMULv2f64, 2, MCP::FMLSv2f64_OP2);
5324 
5325     Found |= Match(AArch64::FMULv2i64_indexed, 1, MCP::FMLSv2i64_indexed_OP1) ||
5326              Match(AArch64::FMULv2f64, 1, MCP::FMLSv2f64_OP1);
5327     break;
5328   case AArch64::FSUBv4f32:
5329     Found |= Match(AArch64::FMULv4i32_indexed, 2, MCP::FMLSv4i32_indexed_OP2) ||
5330              Match(AArch64::FMULv4f32, 2, MCP::FMLSv4f32_OP2);
5331 
5332     Found |= Match(AArch64::FMULv4i32_indexed, 1, MCP::FMLSv4i32_indexed_OP1) ||
5333              Match(AArch64::FMULv4f32, 1, MCP::FMLSv4f32_OP1);
5334     break;
5335   }
5336   return Found;
5337 }
5338 
getFMULPatterns(MachineInstr & Root,SmallVectorImpl<MachineCombinerPattern> & Patterns)5339 static bool getFMULPatterns(MachineInstr &Root,
5340                             SmallVectorImpl<MachineCombinerPattern> &Patterns) {
5341   MachineBasicBlock &MBB = *Root.getParent();
5342   bool Found = false;
5343 
5344   auto Match = [&](unsigned Opcode, int Operand,
5345                    MachineCombinerPattern Pattern) -> bool {
5346     MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5347     MachineOperand &MO = Root.getOperand(Operand);
5348     MachineInstr *MI = nullptr;
5349     if (MO.isReg() && MO.getReg().isVirtual())
5350       MI = MRI.getUniqueVRegDef(MO.getReg());
5351     // Ignore No-op COPYs in FMUL(COPY(DUP(..)))
5352     if (MI && MI->getOpcode() == TargetOpcode::COPY &&
5353         MI->getOperand(1).getReg().isVirtual())
5354       MI = MRI.getUniqueVRegDef(MI->getOperand(1).getReg());
5355     if (MI && MI->getOpcode() == Opcode) {
5356       Patterns.push_back(Pattern);
5357       return true;
5358     }
5359     return false;
5360   };
5361 
5362   typedef MachineCombinerPattern MCP;
5363 
5364   switch (Root.getOpcode()) {
5365   default:
5366     return false;
5367   case AArch64::FMULv2f32:
5368     Found = Match(AArch64::DUPv2i32lane, 1, MCP::FMULv2i32_indexed_OP1);
5369     Found |= Match(AArch64::DUPv2i32lane, 2, MCP::FMULv2i32_indexed_OP2);
5370     break;
5371   case AArch64::FMULv2f64:
5372     Found = Match(AArch64::DUPv2i64lane, 1, MCP::FMULv2i64_indexed_OP1);
5373     Found |= Match(AArch64::DUPv2i64lane, 2, MCP::FMULv2i64_indexed_OP2);
5374     break;
5375   case AArch64::FMULv4f16:
5376     Found = Match(AArch64::DUPv4i16lane, 1, MCP::FMULv4i16_indexed_OP1);
5377     Found |= Match(AArch64::DUPv4i16lane, 2, MCP::FMULv4i16_indexed_OP2);
5378     break;
5379   case AArch64::FMULv4f32:
5380     Found = Match(AArch64::DUPv4i32lane, 1, MCP::FMULv4i32_indexed_OP1);
5381     Found |= Match(AArch64::DUPv4i32lane, 2, MCP::FMULv4i32_indexed_OP2);
5382     break;
5383   case AArch64::FMULv8f16:
5384     Found = Match(AArch64::DUPv8i16lane, 1, MCP::FMULv8i16_indexed_OP1);
5385     Found |= Match(AArch64::DUPv8i16lane, 2, MCP::FMULv8i16_indexed_OP2);
5386     break;
5387   }
5388 
5389   return Found;
5390 }
5391 
5392 /// Return true when a code sequence can improve throughput. It
5393 /// should be called only for instructions in loops.
5394 /// \param Pattern - combiner pattern
isThroughputPattern(MachineCombinerPattern Pattern) const5395 bool AArch64InstrInfo::isThroughputPattern(
5396     MachineCombinerPattern Pattern) const {
5397   switch (Pattern) {
5398   default:
5399     break;
5400   case MachineCombinerPattern::FMULADDH_OP1:
5401   case MachineCombinerPattern::FMULADDH_OP2:
5402   case MachineCombinerPattern::FMULSUBH_OP1:
5403   case MachineCombinerPattern::FMULSUBH_OP2:
5404   case MachineCombinerPattern::FMULADDS_OP1:
5405   case MachineCombinerPattern::FMULADDS_OP2:
5406   case MachineCombinerPattern::FMULSUBS_OP1:
5407   case MachineCombinerPattern::FMULSUBS_OP2:
5408   case MachineCombinerPattern::FMULADDD_OP1:
5409   case MachineCombinerPattern::FMULADDD_OP2:
5410   case MachineCombinerPattern::FMULSUBD_OP1:
5411   case MachineCombinerPattern::FMULSUBD_OP2:
5412   case MachineCombinerPattern::FNMULSUBH_OP1:
5413   case MachineCombinerPattern::FNMULSUBS_OP1:
5414   case MachineCombinerPattern::FNMULSUBD_OP1:
5415   case MachineCombinerPattern::FMLAv4i16_indexed_OP1:
5416   case MachineCombinerPattern::FMLAv4i16_indexed_OP2:
5417   case MachineCombinerPattern::FMLAv8i16_indexed_OP1:
5418   case MachineCombinerPattern::FMLAv8i16_indexed_OP2:
5419   case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
5420   case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
5421   case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
5422   case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
5423   case MachineCombinerPattern::FMLAv4f16_OP2:
5424   case MachineCombinerPattern::FMLAv4f16_OP1:
5425   case MachineCombinerPattern::FMLAv8f16_OP1:
5426   case MachineCombinerPattern::FMLAv8f16_OP2:
5427   case MachineCombinerPattern::FMLAv2f32_OP2:
5428   case MachineCombinerPattern::FMLAv2f32_OP1:
5429   case MachineCombinerPattern::FMLAv2f64_OP1:
5430   case MachineCombinerPattern::FMLAv2f64_OP2:
5431   case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
5432   case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
5433   case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
5434   case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
5435   case MachineCombinerPattern::FMLAv4f32_OP1:
5436   case MachineCombinerPattern::FMLAv4f32_OP2:
5437   case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
5438   case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
5439   case MachineCombinerPattern::FMLSv4i16_indexed_OP1:
5440   case MachineCombinerPattern::FMLSv4i16_indexed_OP2:
5441   case MachineCombinerPattern::FMLSv8i16_indexed_OP1:
5442   case MachineCombinerPattern::FMLSv8i16_indexed_OP2:
5443   case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
5444   case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
5445   case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
5446   case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
5447   case MachineCombinerPattern::FMLSv4f16_OP1:
5448   case MachineCombinerPattern::FMLSv4f16_OP2:
5449   case MachineCombinerPattern::FMLSv8f16_OP1:
5450   case MachineCombinerPattern::FMLSv8f16_OP2:
5451   case MachineCombinerPattern::FMLSv2f32_OP2:
5452   case MachineCombinerPattern::FMLSv2f64_OP2:
5453   case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
5454   case MachineCombinerPattern::FMLSv4f32_OP2:
5455   case MachineCombinerPattern::FMULv2i32_indexed_OP1:
5456   case MachineCombinerPattern::FMULv2i32_indexed_OP2:
5457   case MachineCombinerPattern::FMULv2i64_indexed_OP1:
5458   case MachineCombinerPattern::FMULv2i64_indexed_OP2:
5459   case MachineCombinerPattern::FMULv4i16_indexed_OP1:
5460   case MachineCombinerPattern::FMULv4i16_indexed_OP2:
5461   case MachineCombinerPattern::FMULv4i32_indexed_OP1:
5462   case MachineCombinerPattern::FMULv4i32_indexed_OP2:
5463   case MachineCombinerPattern::FMULv8i16_indexed_OP1:
5464   case MachineCombinerPattern::FMULv8i16_indexed_OP2:
5465   case MachineCombinerPattern::MULADDv8i8_OP1:
5466   case MachineCombinerPattern::MULADDv8i8_OP2:
5467   case MachineCombinerPattern::MULADDv16i8_OP1:
5468   case MachineCombinerPattern::MULADDv16i8_OP2:
5469   case MachineCombinerPattern::MULADDv4i16_OP1:
5470   case MachineCombinerPattern::MULADDv4i16_OP2:
5471   case MachineCombinerPattern::MULADDv8i16_OP1:
5472   case MachineCombinerPattern::MULADDv8i16_OP2:
5473   case MachineCombinerPattern::MULADDv2i32_OP1:
5474   case MachineCombinerPattern::MULADDv2i32_OP2:
5475   case MachineCombinerPattern::MULADDv4i32_OP1:
5476   case MachineCombinerPattern::MULADDv4i32_OP2:
5477   case MachineCombinerPattern::MULSUBv8i8_OP1:
5478   case MachineCombinerPattern::MULSUBv8i8_OP2:
5479   case MachineCombinerPattern::MULSUBv16i8_OP1:
5480   case MachineCombinerPattern::MULSUBv16i8_OP2:
5481   case MachineCombinerPattern::MULSUBv4i16_OP1:
5482   case MachineCombinerPattern::MULSUBv4i16_OP2:
5483   case MachineCombinerPattern::MULSUBv8i16_OP1:
5484   case MachineCombinerPattern::MULSUBv8i16_OP2:
5485   case MachineCombinerPattern::MULSUBv2i32_OP1:
5486   case MachineCombinerPattern::MULSUBv2i32_OP2:
5487   case MachineCombinerPattern::MULSUBv4i32_OP1:
5488   case MachineCombinerPattern::MULSUBv4i32_OP2:
5489   case MachineCombinerPattern::MULADDv4i16_indexed_OP1:
5490   case MachineCombinerPattern::MULADDv4i16_indexed_OP2:
5491   case MachineCombinerPattern::MULADDv8i16_indexed_OP1:
5492   case MachineCombinerPattern::MULADDv8i16_indexed_OP2:
5493   case MachineCombinerPattern::MULADDv2i32_indexed_OP1:
5494   case MachineCombinerPattern::MULADDv2i32_indexed_OP2:
5495   case MachineCombinerPattern::MULADDv4i32_indexed_OP1:
5496   case MachineCombinerPattern::MULADDv4i32_indexed_OP2:
5497   case MachineCombinerPattern::MULSUBv4i16_indexed_OP1:
5498   case MachineCombinerPattern::MULSUBv4i16_indexed_OP2:
5499   case MachineCombinerPattern::MULSUBv8i16_indexed_OP1:
5500   case MachineCombinerPattern::MULSUBv8i16_indexed_OP2:
5501   case MachineCombinerPattern::MULSUBv2i32_indexed_OP1:
5502   case MachineCombinerPattern::MULSUBv2i32_indexed_OP2:
5503   case MachineCombinerPattern::MULSUBv4i32_indexed_OP1:
5504   case MachineCombinerPattern::MULSUBv4i32_indexed_OP2:
5505     return true;
5506   } // end switch (Pattern)
5507   return false;
5508 }
5509 
5510 /// Find other MI combine patterns.
getMiscPatterns(MachineInstr & Root,SmallVectorImpl<MachineCombinerPattern> & Patterns)5511 static bool getMiscPatterns(MachineInstr &Root,
5512                             SmallVectorImpl<MachineCombinerPattern> &Patterns)
5513 {
5514   // A - (B + C)  ==>   (A - B) - C  or  (A - C) - B
5515   unsigned Opc = Root.getOpcode();
5516   MachineBasicBlock &MBB = *Root.getParent();
5517 
5518   switch (Opc) {
5519   case AArch64::SUBWrr:
5520   case AArch64::SUBSWrr:
5521   case AArch64::SUBXrr:
5522   case AArch64::SUBSXrr:
5523     // Found candidate root.
5524     break;
5525   default:
5526     return false;
5527   }
5528 
5529   if (isCombineInstrSettingFlag(Opc) &&
5530       Root.findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
5531     return false;
5532 
5533   if (canCombine(MBB, Root.getOperand(2), AArch64::ADDWrr) ||
5534       canCombine(MBB, Root.getOperand(2), AArch64::ADDSWrr) ||
5535       canCombine(MBB, Root.getOperand(2), AArch64::ADDXrr) ||
5536       canCombine(MBB, Root.getOperand(2), AArch64::ADDSXrr)) {
5537     Patterns.push_back(MachineCombinerPattern::SUBADD_OP1);
5538     Patterns.push_back(MachineCombinerPattern::SUBADD_OP2);
5539     return true;
5540   }
5541 
5542   return false;
5543 }
5544 
5545 /// Return true when there is potentially a faster code sequence for an
5546 /// instruction chain ending in \p Root. All potential patterns are listed in
5547 /// the \p Pattern vector. Pattern should be sorted in priority order since the
5548 /// pattern evaluator stops checking as soon as it finds a faster sequence.
5549 
getMachineCombinerPatterns(MachineInstr & Root,SmallVectorImpl<MachineCombinerPattern> & Patterns,bool DoRegPressureReduce) const5550 bool AArch64InstrInfo::getMachineCombinerPatterns(
5551     MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
5552     bool DoRegPressureReduce) const {
5553   // Integer patterns
5554   if (getMaddPatterns(Root, Patterns))
5555     return true;
5556   // Floating point patterns
5557   if (getFMULPatterns(Root, Patterns))
5558     return true;
5559   if (getFMAPatterns(Root, Patterns))
5560     return true;
5561 
5562   // Other patterns
5563   if (getMiscPatterns(Root, Patterns))
5564     return true;
5565 
5566   return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
5567                                                      DoRegPressureReduce);
5568 }
5569 
5570 enum class FMAInstKind { Default, Indexed, Accumulator };
5571 /// genFusedMultiply - Generate fused multiply instructions.
5572 /// This function supports both integer and floating point instructions.
5573 /// A typical example:
5574 ///  F|MUL I=A,B,0
5575 ///  F|ADD R,I,C
5576 ///  ==> F|MADD R,A,B,C
5577 /// \param MF Containing MachineFunction
5578 /// \param MRI Register information
5579 /// \param TII Target information
5580 /// \param Root is the F|ADD instruction
5581 /// \param [out] InsInstrs is a vector of machine instructions and will
5582 /// contain the generated madd instruction
5583 /// \param IdxMulOpd is index of operand in Root that is the result of
5584 /// the F|MUL. In the example above IdxMulOpd is 1.
5585 /// \param MaddOpc the opcode fo the f|madd instruction
5586 /// \param RC Register class of operands
5587 /// \param kind of fma instruction (addressing mode) to be generated
5588 /// \param ReplacedAddend is the result register from the instruction
5589 /// replacing the non-combined operand, if any.
5590 static MachineInstr *
genFusedMultiply(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,unsigned IdxMulOpd,unsigned MaddOpc,const TargetRegisterClass * RC,FMAInstKind kind=FMAInstKind::Default,const Register * ReplacedAddend=nullptr)5591 genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
5592                  const TargetInstrInfo *TII, MachineInstr &Root,
5593                  SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
5594                  unsigned MaddOpc, const TargetRegisterClass *RC,
5595                  FMAInstKind kind = FMAInstKind::Default,
5596                  const Register *ReplacedAddend = nullptr) {
5597   assert(IdxMulOpd == 1 || IdxMulOpd == 2);
5598 
5599   unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
5600   MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
5601   Register ResultReg = Root.getOperand(0).getReg();
5602   Register SrcReg0 = MUL->getOperand(1).getReg();
5603   bool Src0IsKill = MUL->getOperand(1).isKill();
5604   Register SrcReg1 = MUL->getOperand(2).getReg();
5605   bool Src1IsKill = MUL->getOperand(2).isKill();
5606 
5607   Register SrcReg2;
5608   bool Src2IsKill;
5609   if (ReplacedAddend) {
5610     // If we just generated a new addend, we must be it's only use.
5611     SrcReg2 = *ReplacedAddend;
5612     Src2IsKill = true;
5613   } else {
5614     SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
5615     Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
5616   }
5617 
5618   if (ResultReg.isVirtual())
5619     MRI.constrainRegClass(ResultReg, RC);
5620   if (SrcReg0.isVirtual())
5621     MRI.constrainRegClass(SrcReg0, RC);
5622   if (SrcReg1.isVirtual())
5623     MRI.constrainRegClass(SrcReg1, RC);
5624   if (SrcReg2.isVirtual())
5625     MRI.constrainRegClass(SrcReg2, RC);
5626 
5627   MachineInstrBuilder MIB;
5628   if (kind == FMAInstKind::Default)
5629     MIB = BuildMI(MF, MIMetadata(Root), TII->get(MaddOpc), ResultReg)
5630               .addReg(SrcReg0, getKillRegState(Src0IsKill))
5631               .addReg(SrcReg1, getKillRegState(Src1IsKill))
5632               .addReg(SrcReg2, getKillRegState(Src2IsKill));
5633   else if (kind == FMAInstKind::Indexed)
5634     MIB = BuildMI(MF, MIMetadata(Root), TII->get(MaddOpc), ResultReg)
5635               .addReg(SrcReg2, getKillRegState(Src2IsKill))
5636               .addReg(SrcReg0, getKillRegState(Src0IsKill))
5637               .addReg(SrcReg1, getKillRegState(Src1IsKill))
5638               .addImm(MUL->getOperand(3).getImm());
5639   else if (kind == FMAInstKind::Accumulator)
5640     MIB = BuildMI(MF, MIMetadata(Root), TII->get(MaddOpc), ResultReg)
5641               .addReg(SrcReg2, getKillRegState(Src2IsKill))
5642               .addReg(SrcReg0, getKillRegState(Src0IsKill))
5643               .addReg(SrcReg1, getKillRegState(Src1IsKill));
5644   else
5645     assert(false && "Invalid FMA instruction kind \n");
5646   // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
5647   InsInstrs.push_back(MIB);
5648   return MUL;
5649 }
5650 
5651 /// Fold (FMUL x (DUP y lane)) into (FMUL_indexed x y lane)
5652 static MachineInstr *
genIndexedMultiply(MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,unsigned IdxDupOp,unsigned MulOpc,const TargetRegisterClass * RC,MachineRegisterInfo & MRI)5653 genIndexedMultiply(MachineInstr &Root,
5654                    SmallVectorImpl<MachineInstr *> &InsInstrs,
5655                    unsigned IdxDupOp, unsigned MulOpc,
5656                    const TargetRegisterClass *RC, MachineRegisterInfo &MRI) {
5657   assert(((IdxDupOp == 1) || (IdxDupOp == 2)) &&
5658          "Invalid index of FMUL operand");
5659 
5660   MachineFunction &MF = *Root.getMF();
5661   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
5662 
5663   MachineInstr *Dup =
5664       MF.getRegInfo().getUniqueVRegDef(Root.getOperand(IdxDupOp).getReg());
5665 
5666   if (Dup->getOpcode() == TargetOpcode::COPY)
5667     Dup = MRI.getUniqueVRegDef(Dup->getOperand(1).getReg());
5668 
5669   Register DupSrcReg = Dup->getOperand(1).getReg();
5670   MRI.clearKillFlags(DupSrcReg);
5671   MRI.constrainRegClass(DupSrcReg, RC);
5672 
5673   unsigned DupSrcLane = Dup->getOperand(2).getImm();
5674 
5675   unsigned IdxMulOp = IdxDupOp == 1 ? 2 : 1;
5676   MachineOperand &MulOp = Root.getOperand(IdxMulOp);
5677 
5678   Register ResultReg = Root.getOperand(0).getReg();
5679 
5680   MachineInstrBuilder MIB;
5681   MIB = BuildMI(MF, MIMetadata(Root), TII->get(MulOpc), ResultReg)
5682             .add(MulOp)
5683             .addReg(DupSrcReg)
5684             .addImm(DupSrcLane);
5685 
5686   InsInstrs.push_back(MIB);
5687   return &Root;
5688 }
5689 
5690 /// genFusedMultiplyAcc - Helper to generate fused multiply accumulate
5691 /// instructions.
5692 ///
5693 /// \see genFusedMultiply
genFusedMultiplyAcc(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,unsigned IdxMulOpd,unsigned MaddOpc,const TargetRegisterClass * RC)5694 static MachineInstr *genFusedMultiplyAcc(
5695     MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
5696     MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
5697     unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC) {
5698   return genFusedMultiply(MF, MRI, TII, Root, InsInstrs, IdxMulOpd, MaddOpc, RC,
5699                           FMAInstKind::Accumulator);
5700 }
5701 
5702 /// genNeg - Helper to generate an intermediate negation of the second operand
5703 /// of Root
genNeg(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg,unsigned MnegOpc,const TargetRegisterClass * RC)5704 static Register genNeg(MachineFunction &MF, MachineRegisterInfo &MRI,
5705                        const TargetInstrInfo *TII, MachineInstr &Root,
5706                        SmallVectorImpl<MachineInstr *> &InsInstrs,
5707                        DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
5708                        unsigned MnegOpc, const TargetRegisterClass *RC) {
5709   Register NewVR = MRI.createVirtualRegister(RC);
5710   MachineInstrBuilder MIB =
5711       BuildMI(MF, MIMetadata(Root), TII->get(MnegOpc), NewVR)
5712           .add(Root.getOperand(2));
5713   InsInstrs.push_back(MIB);
5714 
5715   assert(InstrIdxForVirtReg.empty());
5716   InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
5717 
5718   return NewVR;
5719 }
5720 
5721 /// genFusedMultiplyAccNeg - Helper to generate fused multiply accumulate
5722 /// instructions with an additional negation of the accumulator
genFusedMultiplyAccNeg(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg,unsigned IdxMulOpd,unsigned MaddOpc,unsigned MnegOpc,const TargetRegisterClass * RC)5723 static MachineInstr *genFusedMultiplyAccNeg(
5724     MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
5725     MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
5726     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, unsigned IdxMulOpd,
5727     unsigned MaddOpc, unsigned MnegOpc, const TargetRegisterClass *RC) {
5728   assert(IdxMulOpd == 1);
5729 
5730   Register NewVR =
5731       genNeg(MF, MRI, TII, Root, InsInstrs, InstrIdxForVirtReg, MnegOpc, RC);
5732   return genFusedMultiply(MF, MRI, TII, Root, InsInstrs, IdxMulOpd, MaddOpc, RC,
5733                           FMAInstKind::Accumulator, &NewVR);
5734 }
5735 
5736 /// genFusedMultiplyIdx - Helper to generate fused multiply accumulate
5737 /// instructions.
5738 ///
5739 /// \see genFusedMultiply
genFusedMultiplyIdx(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,unsigned IdxMulOpd,unsigned MaddOpc,const TargetRegisterClass * RC)5740 static MachineInstr *genFusedMultiplyIdx(
5741     MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
5742     MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
5743     unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC) {
5744   return genFusedMultiply(MF, MRI, TII, Root, InsInstrs, IdxMulOpd, MaddOpc, RC,
5745                           FMAInstKind::Indexed);
5746 }
5747 
5748 /// genFusedMultiplyAccNeg - Helper to generate fused multiply accumulate
5749 /// instructions with an additional negation of the accumulator
genFusedMultiplyIdxNeg(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg,unsigned IdxMulOpd,unsigned MaddOpc,unsigned MnegOpc,const TargetRegisterClass * RC)5750 static MachineInstr *genFusedMultiplyIdxNeg(
5751     MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
5752     MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
5753     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, unsigned IdxMulOpd,
5754     unsigned MaddOpc, unsigned MnegOpc, const TargetRegisterClass *RC) {
5755   assert(IdxMulOpd == 1);
5756 
5757   Register NewVR =
5758       genNeg(MF, MRI, TII, Root, InsInstrs, InstrIdxForVirtReg, MnegOpc, RC);
5759 
5760   return genFusedMultiply(MF, MRI, TII, Root, InsInstrs, IdxMulOpd, MaddOpc, RC,
5761                           FMAInstKind::Indexed, &NewVR);
5762 }
5763 
5764 /// genMaddR - Generate madd instruction and combine mul and add using
5765 /// an extra virtual register
5766 /// Example - an ADD intermediate needs to be stored in a register:
5767 ///   MUL I=A,B,0
5768 ///   ADD R,I,Imm
5769 ///   ==> ORR  V, ZR, Imm
5770 ///   ==> MADD R,A,B,V
5771 /// \param MF Containing MachineFunction
5772 /// \param MRI Register information
5773 /// \param TII Target information
5774 /// \param Root is the ADD instruction
5775 /// \param [out] InsInstrs is a vector of machine instructions and will
5776 /// contain the generated madd instruction
5777 /// \param IdxMulOpd is index of operand in Root that is the result of
5778 /// the MUL. In the example above IdxMulOpd is 1.
5779 /// \param MaddOpc the opcode fo the madd instruction
5780 /// \param VR is a virtual register that holds the value of an ADD operand
5781 /// (V in the example above).
5782 /// \param RC Register class of operands
genMaddR(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,unsigned IdxMulOpd,unsigned MaddOpc,unsigned VR,const TargetRegisterClass * RC)5783 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
5784                               const TargetInstrInfo *TII, MachineInstr &Root,
5785                               SmallVectorImpl<MachineInstr *> &InsInstrs,
5786                               unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR,
5787                               const TargetRegisterClass *RC) {
5788   assert(IdxMulOpd == 1 || IdxMulOpd == 2);
5789 
5790   MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
5791   Register ResultReg = Root.getOperand(0).getReg();
5792   Register SrcReg0 = MUL->getOperand(1).getReg();
5793   bool Src0IsKill = MUL->getOperand(1).isKill();
5794   Register SrcReg1 = MUL->getOperand(2).getReg();
5795   bool Src1IsKill = MUL->getOperand(2).isKill();
5796 
5797   if (ResultReg.isVirtual())
5798     MRI.constrainRegClass(ResultReg, RC);
5799   if (SrcReg0.isVirtual())
5800     MRI.constrainRegClass(SrcReg0, RC);
5801   if (SrcReg1.isVirtual())
5802     MRI.constrainRegClass(SrcReg1, RC);
5803   if (Register::isVirtualRegister(VR))
5804     MRI.constrainRegClass(VR, RC);
5805 
5806   MachineInstrBuilder MIB =
5807       BuildMI(MF, MIMetadata(Root), TII->get(MaddOpc), ResultReg)
5808           .addReg(SrcReg0, getKillRegState(Src0IsKill))
5809           .addReg(SrcReg1, getKillRegState(Src1IsKill))
5810           .addReg(VR);
5811   // Insert the MADD
5812   InsInstrs.push_back(MIB);
5813   return MUL;
5814 }
5815 
5816 /// Do the following transformation
5817 /// A - (B + C)  ==>   (A - B) - C
5818 /// A - (B + C)  ==>   (A - C) - B
5819 static void
genSubAdd2SubSub(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,unsigned IdxOpd1,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg)5820 genSubAdd2SubSub(MachineFunction &MF, MachineRegisterInfo &MRI,
5821                  const TargetInstrInfo *TII, MachineInstr &Root,
5822                  SmallVectorImpl<MachineInstr *> &InsInstrs,
5823                  SmallVectorImpl<MachineInstr *> &DelInstrs,
5824                  unsigned IdxOpd1,
5825                  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) {
5826   assert(IdxOpd1 == 1 || IdxOpd1 == 2);
5827   unsigned IdxOtherOpd = IdxOpd1 == 1 ? 2 : 1;
5828   MachineInstr *AddMI = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
5829 
5830   Register ResultReg = Root.getOperand(0).getReg();
5831   Register RegA = Root.getOperand(1).getReg();
5832   bool RegAIsKill = Root.getOperand(1).isKill();
5833   Register RegB = AddMI->getOperand(IdxOpd1).getReg();
5834   bool RegBIsKill = AddMI->getOperand(IdxOpd1).isKill();
5835   Register RegC = AddMI->getOperand(IdxOtherOpd).getReg();
5836   bool RegCIsKill = AddMI->getOperand(IdxOtherOpd).isKill();
5837   Register NewVR = MRI.createVirtualRegister(MRI.getRegClass(RegA));
5838 
5839   unsigned Opcode = Root.getOpcode();
5840   if (Opcode == AArch64::SUBSWrr)
5841     Opcode = AArch64::SUBWrr;
5842   else if (Opcode == AArch64::SUBSXrr)
5843     Opcode = AArch64::SUBXrr;
5844   else
5845     assert((Opcode == AArch64::SUBWrr || Opcode == AArch64::SUBXrr) &&
5846            "Unexpected instruction opcode.");
5847 
5848   MachineInstrBuilder MIB1 =
5849       BuildMI(MF, MIMetadata(Root), TII->get(Opcode), NewVR)
5850           .addReg(RegA, getKillRegState(RegAIsKill))
5851           .addReg(RegB, getKillRegState(RegBIsKill));
5852   MachineInstrBuilder MIB2 =
5853       BuildMI(MF, MIMetadata(Root), TII->get(Opcode), ResultReg)
5854           .addReg(NewVR, getKillRegState(true))
5855           .addReg(RegC, getKillRegState(RegCIsKill));
5856 
5857   InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
5858   InsInstrs.push_back(MIB1);
5859   InsInstrs.push_back(MIB2);
5860   DelInstrs.push_back(AddMI);
5861 }
5862 
5863 /// When getMachineCombinerPatterns() finds potential patterns,
5864 /// this function generates the instructions that could replace the
5865 /// original code sequence
genAlternativeCodeSequence(MachineInstr & Root,MachineCombinerPattern Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg) const5866 void AArch64InstrInfo::genAlternativeCodeSequence(
5867     MachineInstr &Root, MachineCombinerPattern Pattern,
5868     SmallVectorImpl<MachineInstr *> &InsInstrs,
5869     SmallVectorImpl<MachineInstr *> &DelInstrs,
5870     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
5871   MachineBasicBlock &MBB = *Root.getParent();
5872   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5873   MachineFunction &MF = *MBB.getParent();
5874   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
5875 
5876   MachineInstr *MUL = nullptr;
5877   const TargetRegisterClass *RC;
5878   unsigned Opc;
5879   switch (Pattern) {
5880   default:
5881     // Reassociate instructions.
5882     TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
5883                                                 DelInstrs, InstrIdxForVirtReg);
5884     return;
5885   case MachineCombinerPattern::SUBADD_OP1:
5886     // A - (B + C)
5887     // ==> (A - B) - C
5888     genSubAdd2SubSub(MF, MRI, TII, Root, InsInstrs, DelInstrs, 1,
5889                      InstrIdxForVirtReg);
5890     break;
5891   case MachineCombinerPattern::SUBADD_OP2:
5892     // A - (B + C)
5893     // ==> (A - C) - B
5894     genSubAdd2SubSub(MF, MRI, TII, Root, InsInstrs, DelInstrs, 2,
5895                      InstrIdxForVirtReg);
5896     break;
5897   case MachineCombinerPattern::MULADDW_OP1:
5898   case MachineCombinerPattern::MULADDX_OP1:
5899     // MUL I=A,B,0
5900     // ADD R,I,C
5901     // ==> MADD R,A,B,C
5902     // --- Create(MADD);
5903     if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
5904       Opc = AArch64::MADDWrrr;
5905       RC = &AArch64::GPR32RegClass;
5906     } else {
5907       Opc = AArch64::MADDXrrr;
5908       RC = &AArch64::GPR64RegClass;
5909     }
5910     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
5911     break;
5912   case MachineCombinerPattern::MULADDW_OP2:
5913   case MachineCombinerPattern::MULADDX_OP2:
5914     // MUL I=A,B,0
5915     // ADD R,C,I
5916     // ==> MADD R,A,B,C
5917     // --- Create(MADD);
5918     if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
5919       Opc = AArch64::MADDWrrr;
5920       RC = &AArch64::GPR32RegClass;
5921     } else {
5922       Opc = AArch64::MADDXrrr;
5923       RC = &AArch64::GPR64RegClass;
5924     }
5925     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
5926     break;
5927   case MachineCombinerPattern::MULADDWI_OP1:
5928   case MachineCombinerPattern::MULADDXI_OP1: {
5929     // MUL I=A,B,0
5930     // ADD R,I,Imm
5931     // ==> MOV V, Imm
5932     // ==> MADD R,A,B,V
5933     // --- Create(MADD);
5934     const TargetRegisterClass *OrrRC;
5935     unsigned BitSize, OrrOpc, ZeroReg;
5936     if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
5937       OrrOpc = AArch64::ORRWri;
5938       OrrRC = &AArch64::GPR32spRegClass;
5939       BitSize = 32;
5940       ZeroReg = AArch64::WZR;
5941       Opc = AArch64::MADDWrrr;
5942       RC = &AArch64::GPR32RegClass;
5943     } else {
5944       OrrOpc = AArch64::ORRXri;
5945       OrrRC = &AArch64::GPR64spRegClass;
5946       BitSize = 64;
5947       ZeroReg = AArch64::XZR;
5948       Opc = AArch64::MADDXrrr;
5949       RC = &AArch64::GPR64RegClass;
5950     }
5951     Register NewVR = MRI.createVirtualRegister(OrrRC);
5952     uint64_t Imm = Root.getOperand(2).getImm();
5953 
5954     if (Root.getOperand(3).isImm()) {
5955       unsigned Val = Root.getOperand(3).getImm();
5956       Imm = Imm << Val;
5957     }
5958     uint64_t UImm = SignExtend64(Imm, BitSize);
5959     // The immediate can be composed via a single instruction.
5960     SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
5961     AArch64_IMM::expandMOVImm(UImm, BitSize, Insn);
5962     if (Insn.size() != 1)
5963       return;
5964     auto MovI = Insn.begin();
5965     MachineInstrBuilder MIB1;
5966     // MOV is an alias for one of three instructions: movz, movn, and orr.
5967     if (MovI->Opcode == OrrOpc)
5968       MIB1 = BuildMI(MF, MIMetadata(Root), TII->get(OrrOpc), NewVR)
5969                  .addReg(ZeroReg)
5970                  .addImm(MovI->Op2);
5971     else {
5972       if (BitSize == 32)
5973         assert((MovI->Opcode == AArch64::MOVNWi ||
5974                 MovI->Opcode == AArch64::MOVZWi) &&
5975                "Expected opcode");
5976       else
5977         assert((MovI->Opcode == AArch64::MOVNXi ||
5978                 MovI->Opcode == AArch64::MOVZXi) &&
5979                "Expected opcode");
5980       MIB1 = BuildMI(MF, MIMetadata(Root), TII->get(MovI->Opcode), NewVR)
5981                  .addImm(MovI->Op1)
5982                  .addImm(MovI->Op2);
5983     }
5984     InsInstrs.push_back(MIB1);
5985     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
5986     MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
5987     break;
5988   }
5989   case MachineCombinerPattern::MULSUBW_OP1:
5990   case MachineCombinerPattern::MULSUBX_OP1: {
5991     // MUL I=A,B,0
5992     // SUB R,I, C
5993     // ==> SUB  V, 0, C
5994     // ==> MADD R,A,B,V // = -C + A*B
5995     // --- Create(MADD);
5996     const TargetRegisterClass *SubRC;
5997     unsigned SubOpc, ZeroReg;
5998     if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
5999       SubOpc = AArch64::SUBWrr;
6000       SubRC = &AArch64::GPR32spRegClass;
6001       ZeroReg = AArch64::WZR;
6002       Opc = AArch64::MADDWrrr;
6003       RC = &AArch64::GPR32RegClass;
6004     } else {
6005       SubOpc = AArch64::SUBXrr;
6006       SubRC = &AArch64::GPR64spRegClass;
6007       ZeroReg = AArch64::XZR;
6008       Opc = AArch64::MADDXrrr;
6009       RC = &AArch64::GPR64RegClass;
6010     }
6011     Register NewVR = MRI.createVirtualRegister(SubRC);
6012     // SUB NewVR, 0, C
6013     MachineInstrBuilder MIB1 =
6014         BuildMI(MF, MIMetadata(Root), TII->get(SubOpc), NewVR)
6015             .addReg(ZeroReg)
6016             .add(Root.getOperand(2));
6017     InsInstrs.push_back(MIB1);
6018     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6019     MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
6020     break;
6021   }
6022   case MachineCombinerPattern::MULSUBW_OP2:
6023   case MachineCombinerPattern::MULSUBX_OP2:
6024     // MUL I=A,B,0
6025     // SUB R,C,I
6026     // ==> MSUB R,A,B,C (computes C - A*B)
6027     // --- Create(MSUB);
6028     if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
6029       Opc = AArch64::MSUBWrrr;
6030       RC = &AArch64::GPR32RegClass;
6031     } else {
6032       Opc = AArch64::MSUBXrrr;
6033       RC = &AArch64::GPR64RegClass;
6034     }
6035     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6036     break;
6037   case MachineCombinerPattern::MULSUBWI_OP1:
6038   case MachineCombinerPattern::MULSUBXI_OP1: {
6039     // MUL I=A,B,0
6040     // SUB R,I, Imm
6041     // ==> MOV  V, -Imm
6042     // ==> MADD R,A,B,V // = -Imm + A*B
6043     // --- Create(MADD);
6044     const TargetRegisterClass *OrrRC;
6045     unsigned BitSize, OrrOpc, ZeroReg;
6046     if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
6047       OrrOpc = AArch64::ORRWri;
6048       OrrRC = &AArch64::GPR32spRegClass;
6049       BitSize = 32;
6050       ZeroReg = AArch64::WZR;
6051       Opc = AArch64::MADDWrrr;
6052       RC = &AArch64::GPR32RegClass;
6053     } else {
6054       OrrOpc = AArch64::ORRXri;
6055       OrrRC = &AArch64::GPR64spRegClass;
6056       BitSize = 64;
6057       ZeroReg = AArch64::XZR;
6058       Opc = AArch64::MADDXrrr;
6059       RC = &AArch64::GPR64RegClass;
6060     }
6061     Register NewVR = MRI.createVirtualRegister(OrrRC);
6062     uint64_t Imm = Root.getOperand(2).getImm();
6063     if (Root.getOperand(3).isImm()) {
6064       unsigned Val = Root.getOperand(3).getImm();
6065       Imm = Imm << Val;
6066     }
6067     uint64_t UImm = SignExtend64(-Imm, BitSize);
6068     // The immediate can be composed via a single instruction.
6069     SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
6070     AArch64_IMM::expandMOVImm(UImm, BitSize, Insn);
6071     if (Insn.size() != 1)
6072       return;
6073     auto MovI = Insn.begin();
6074     MachineInstrBuilder MIB1;
6075     // MOV is an alias for one of three instructions: movz, movn, and orr.
6076     if (MovI->Opcode == OrrOpc)
6077       MIB1 = BuildMI(MF, MIMetadata(Root), TII->get(OrrOpc), NewVR)
6078                  .addReg(ZeroReg)
6079                  .addImm(MovI->Op2);
6080     else {
6081       if (BitSize == 32)
6082         assert((MovI->Opcode == AArch64::MOVNWi ||
6083                 MovI->Opcode == AArch64::MOVZWi) &&
6084                "Expected opcode");
6085       else
6086         assert((MovI->Opcode == AArch64::MOVNXi ||
6087                 MovI->Opcode == AArch64::MOVZXi) &&
6088                "Expected opcode");
6089       MIB1 = BuildMI(MF, MIMetadata(Root), TII->get(MovI->Opcode), NewVR)
6090                  .addImm(MovI->Op1)
6091                  .addImm(MovI->Op2);
6092     }
6093     InsInstrs.push_back(MIB1);
6094     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6095     MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
6096     break;
6097   }
6098 
6099   case MachineCombinerPattern::MULADDv8i8_OP1:
6100     Opc = AArch64::MLAv8i8;
6101     RC = &AArch64::FPR64RegClass;
6102     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6103     break;
6104   case MachineCombinerPattern::MULADDv8i8_OP2:
6105     Opc = AArch64::MLAv8i8;
6106     RC = &AArch64::FPR64RegClass;
6107     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6108     break;
6109   case MachineCombinerPattern::MULADDv16i8_OP1:
6110     Opc = AArch64::MLAv16i8;
6111     RC = &AArch64::FPR128RegClass;
6112     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6113     break;
6114   case MachineCombinerPattern::MULADDv16i8_OP2:
6115     Opc = AArch64::MLAv16i8;
6116     RC = &AArch64::FPR128RegClass;
6117     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6118     break;
6119   case MachineCombinerPattern::MULADDv4i16_OP1:
6120     Opc = AArch64::MLAv4i16;
6121     RC = &AArch64::FPR64RegClass;
6122     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6123     break;
6124   case MachineCombinerPattern::MULADDv4i16_OP2:
6125     Opc = AArch64::MLAv4i16;
6126     RC = &AArch64::FPR64RegClass;
6127     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6128     break;
6129   case MachineCombinerPattern::MULADDv8i16_OP1:
6130     Opc = AArch64::MLAv8i16;
6131     RC = &AArch64::FPR128RegClass;
6132     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6133     break;
6134   case MachineCombinerPattern::MULADDv8i16_OP2:
6135     Opc = AArch64::MLAv8i16;
6136     RC = &AArch64::FPR128RegClass;
6137     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6138     break;
6139   case MachineCombinerPattern::MULADDv2i32_OP1:
6140     Opc = AArch64::MLAv2i32;
6141     RC = &AArch64::FPR64RegClass;
6142     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6143     break;
6144   case MachineCombinerPattern::MULADDv2i32_OP2:
6145     Opc = AArch64::MLAv2i32;
6146     RC = &AArch64::FPR64RegClass;
6147     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6148     break;
6149   case MachineCombinerPattern::MULADDv4i32_OP1:
6150     Opc = AArch64::MLAv4i32;
6151     RC = &AArch64::FPR128RegClass;
6152     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6153     break;
6154   case MachineCombinerPattern::MULADDv4i32_OP2:
6155     Opc = AArch64::MLAv4i32;
6156     RC = &AArch64::FPR128RegClass;
6157     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6158     break;
6159 
6160   case MachineCombinerPattern::MULSUBv8i8_OP1:
6161     Opc = AArch64::MLAv8i8;
6162     RC = &AArch64::FPR64RegClass;
6163     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
6164                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv8i8,
6165                                  RC);
6166     break;
6167   case MachineCombinerPattern::MULSUBv8i8_OP2:
6168     Opc = AArch64::MLSv8i8;
6169     RC = &AArch64::FPR64RegClass;
6170     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6171     break;
6172   case MachineCombinerPattern::MULSUBv16i8_OP1:
6173     Opc = AArch64::MLAv16i8;
6174     RC = &AArch64::FPR128RegClass;
6175     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
6176                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv16i8,
6177                                  RC);
6178     break;
6179   case MachineCombinerPattern::MULSUBv16i8_OP2:
6180     Opc = AArch64::MLSv16i8;
6181     RC = &AArch64::FPR128RegClass;
6182     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6183     break;
6184   case MachineCombinerPattern::MULSUBv4i16_OP1:
6185     Opc = AArch64::MLAv4i16;
6186     RC = &AArch64::FPR64RegClass;
6187     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
6188                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv4i16,
6189                                  RC);
6190     break;
6191   case MachineCombinerPattern::MULSUBv4i16_OP2:
6192     Opc = AArch64::MLSv4i16;
6193     RC = &AArch64::FPR64RegClass;
6194     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6195     break;
6196   case MachineCombinerPattern::MULSUBv8i16_OP1:
6197     Opc = AArch64::MLAv8i16;
6198     RC = &AArch64::FPR128RegClass;
6199     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
6200                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv8i16,
6201                                  RC);
6202     break;
6203   case MachineCombinerPattern::MULSUBv8i16_OP2:
6204     Opc = AArch64::MLSv8i16;
6205     RC = &AArch64::FPR128RegClass;
6206     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6207     break;
6208   case MachineCombinerPattern::MULSUBv2i32_OP1:
6209     Opc = AArch64::MLAv2i32;
6210     RC = &AArch64::FPR64RegClass;
6211     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
6212                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv2i32,
6213                                  RC);
6214     break;
6215   case MachineCombinerPattern::MULSUBv2i32_OP2:
6216     Opc = AArch64::MLSv2i32;
6217     RC = &AArch64::FPR64RegClass;
6218     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6219     break;
6220   case MachineCombinerPattern::MULSUBv4i32_OP1:
6221     Opc = AArch64::MLAv4i32;
6222     RC = &AArch64::FPR128RegClass;
6223     MUL = genFusedMultiplyAccNeg(MF, MRI, TII, Root, InsInstrs,
6224                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv4i32,
6225                                  RC);
6226     break;
6227   case MachineCombinerPattern::MULSUBv4i32_OP2:
6228     Opc = AArch64::MLSv4i32;
6229     RC = &AArch64::FPR128RegClass;
6230     MUL = genFusedMultiplyAcc(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6231     break;
6232 
6233   case MachineCombinerPattern::MULADDv4i16_indexed_OP1:
6234     Opc = AArch64::MLAv4i16_indexed;
6235     RC = &AArch64::FPR64RegClass;
6236     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6237     break;
6238   case MachineCombinerPattern::MULADDv4i16_indexed_OP2:
6239     Opc = AArch64::MLAv4i16_indexed;
6240     RC = &AArch64::FPR64RegClass;
6241     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6242     break;
6243   case MachineCombinerPattern::MULADDv8i16_indexed_OP1:
6244     Opc = AArch64::MLAv8i16_indexed;
6245     RC = &AArch64::FPR128RegClass;
6246     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6247     break;
6248   case MachineCombinerPattern::MULADDv8i16_indexed_OP2:
6249     Opc = AArch64::MLAv8i16_indexed;
6250     RC = &AArch64::FPR128RegClass;
6251     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6252     break;
6253   case MachineCombinerPattern::MULADDv2i32_indexed_OP1:
6254     Opc = AArch64::MLAv2i32_indexed;
6255     RC = &AArch64::FPR64RegClass;
6256     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6257     break;
6258   case MachineCombinerPattern::MULADDv2i32_indexed_OP2:
6259     Opc = AArch64::MLAv2i32_indexed;
6260     RC = &AArch64::FPR64RegClass;
6261     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6262     break;
6263   case MachineCombinerPattern::MULADDv4i32_indexed_OP1:
6264     Opc = AArch64::MLAv4i32_indexed;
6265     RC = &AArch64::FPR128RegClass;
6266     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6267     break;
6268   case MachineCombinerPattern::MULADDv4i32_indexed_OP2:
6269     Opc = AArch64::MLAv4i32_indexed;
6270     RC = &AArch64::FPR128RegClass;
6271     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6272     break;
6273 
6274   case MachineCombinerPattern::MULSUBv4i16_indexed_OP1:
6275     Opc = AArch64::MLAv4i16_indexed;
6276     RC = &AArch64::FPR64RegClass;
6277     MUL = genFusedMultiplyIdxNeg(MF, MRI, TII, Root, InsInstrs,
6278                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv4i16,
6279                                  RC);
6280     break;
6281   case MachineCombinerPattern::MULSUBv4i16_indexed_OP2:
6282     Opc = AArch64::MLSv4i16_indexed;
6283     RC = &AArch64::FPR64RegClass;
6284     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6285     break;
6286   case MachineCombinerPattern::MULSUBv8i16_indexed_OP1:
6287     Opc = AArch64::MLAv8i16_indexed;
6288     RC = &AArch64::FPR128RegClass;
6289     MUL = genFusedMultiplyIdxNeg(MF, MRI, TII, Root, InsInstrs,
6290                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv8i16,
6291                                  RC);
6292     break;
6293   case MachineCombinerPattern::MULSUBv8i16_indexed_OP2:
6294     Opc = AArch64::MLSv8i16_indexed;
6295     RC = &AArch64::FPR128RegClass;
6296     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6297     break;
6298   case MachineCombinerPattern::MULSUBv2i32_indexed_OP1:
6299     Opc = AArch64::MLAv2i32_indexed;
6300     RC = &AArch64::FPR64RegClass;
6301     MUL = genFusedMultiplyIdxNeg(MF, MRI, TII, Root, InsInstrs,
6302                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv2i32,
6303                                  RC);
6304     break;
6305   case MachineCombinerPattern::MULSUBv2i32_indexed_OP2:
6306     Opc = AArch64::MLSv2i32_indexed;
6307     RC = &AArch64::FPR64RegClass;
6308     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6309     break;
6310   case MachineCombinerPattern::MULSUBv4i32_indexed_OP1:
6311     Opc = AArch64::MLAv4i32_indexed;
6312     RC = &AArch64::FPR128RegClass;
6313     MUL = genFusedMultiplyIdxNeg(MF, MRI, TII, Root, InsInstrs,
6314                                  InstrIdxForVirtReg, 1, Opc, AArch64::NEGv4i32,
6315                                  RC);
6316     break;
6317   case MachineCombinerPattern::MULSUBv4i32_indexed_OP2:
6318     Opc = AArch64::MLSv4i32_indexed;
6319     RC = &AArch64::FPR128RegClass;
6320     MUL = genFusedMultiplyIdx(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6321     break;
6322 
6323   // Floating Point Support
6324   case MachineCombinerPattern::FMULADDH_OP1:
6325     Opc = AArch64::FMADDHrrr;
6326     RC = &AArch64::FPR16RegClass;
6327     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6328     break;
6329   case MachineCombinerPattern::FMULADDS_OP1:
6330     Opc = AArch64::FMADDSrrr;
6331     RC = &AArch64::FPR32RegClass;
6332     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6333     break;
6334   case MachineCombinerPattern::FMULADDD_OP1:
6335     Opc = AArch64::FMADDDrrr;
6336     RC = &AArch64::FPR64RegClass;
6337     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6338     break;
6339 
6340   case MachineCombinerPattern::FMULADDH_OP2:
6341     Opc = AArch64::FMADDHrrr;
6342     RC = &AArch64::FPR16RegClass;
6343     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6344     break;
6345   case MachineCombinerPattern::FMULADDS_OP2:
6346     Opc = AArch64::FMADDSrrr;
6347     RC = &AArch64::FPR32RegClass;
6348     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6349     break;
6350   case MachineCombinerPattern::FMULADDD_OP2:
6351     Opc = AArch64::FMADDDrrr;
6352     RC = &AArch64::FPR64RegClass;
6353     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6354     break;
6355 
6356   case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
6357     Opc = AArch64::FMLAv1i32_indexed;
6358     RC = &AArch64::FPR32RegClass;
6359     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6360                            FMAInstKind::Indexed);
6361     break;
6362   case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
6363     Opc = AArch64::FMLAv1i32_indexed;
6364     RC = &AArch64::FPR32RegClass;
6365     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6366                            FMAInstKind::Indexed);
6367     break;
6368 
6369   case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
6370     Opc = AArch64::FMLAv1i64_indexed;
6371     RC = &AArch64::FPR64RegClass;
6372     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6373                            FMAInstKind::Indexed);
6374     break;
6375   case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
6376     Opc = AArch64::FMLAv1i64_indexed;
6377     RC = &AArch64::FPR64RegClass;
6378     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6379                            FMAInstKind::Indexed);
6380     break;
6381 
6382   case MachineCombinerPattern::FMLAv4i16_indexed_OP1:
6383     RC = &AArch64::FPR64RegClass;
6384     Opc = AArch64::FMLAv4i16_indexed;
6385     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6386                            FMAInstKind::Indexed);
6387     break;
6388   case MachineCombinerPattern::FMLAv4f16_OP1:
6389     RC = &AArch64::FPR64RegClass;
6390     Opc = AArch64::FMLAv4f16;
6391     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6392                            FMAInstKind::Accumulator);
6393     break;
6394   case MachineCombinerPattern::FMLAv4i16_indexed_OP2:
6395     RC = &AArch64::FPR64RegClass;
6396     Opc = AArch64::FMLAv4i16_indexed;
6397     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6398                            FMAInstKind::Indexed);
6399     break;
6400   case MachineCombinerPattern::FMLAv4f16_OP2:
6401     RC = &AArch64::FPR64RegClass;
6402     Opc = AArch64::FMLAv4f16;
6403     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6404                            FMAInstKind::Accumulator);
6405     break;
6406 
6407   case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
6408   case MachineCombinerPattern::FMLAv2f32_OP1:
6409     RC = &AArch64::FPR64RegClass;
6410     if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
6411       Opc = AArch64::FMLAv2i32_indexed;
6412       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6413                              FMAInstKind::Indexed);
6414     } else {
6415       Opc = AArch64::FMLAv2f32;
6416       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6417                              FMAInstKind::Accumulator);
6418     }
6419     break;
6420   case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
6421   case MachineCombinerPattern::FMLAv2f32_OP2:
6422     RC = &AArch64::FPR64RegClass;
6423     if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
6424       Opc = AArch64::FMLAv2i32_indexed;
6425       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6426                              FMAInstKind::Indexed);
6427     } else {
6428       Opc = AArch64::FMLAv2f32;
6429       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6430                              FMAInstKind::Accumulator);
6431     }
6432     break;
6433 
6434   case MachineCombinerPattern::FMLAv8i16_indexed_OP1:
6435     RC = &AArch64::FPR128RegClass;
6436     Opc = AArch64::FMLAv8i16_indexed;
6437     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6438                            FMAInstKind::Indexed);
6439     break;
6440   case MachineCombinerPattern::FMLAv8f16_OP1:
6441     RC = &AArch64::FPR128RegClass;
6442     Opc = AArch64::FMLAv8f16;
6443     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6444                            FMAInstKind::Accumulator);
6445     break;
6446   case MachineCombinerPattern::FMLAv8i16_indexed_OP2:
6447     RC = &AArch64::FPR128RegClass;
6448     Opc = AArch64::FMLAv8i16_indexed;
6449     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6450                            FMAInstKind::Indexed);
6451     break;
6452   case MachineCombinerPattern::FMLAv8f16_OP2:
6453     RC = &AArch64::FPR128RegClass;
6454     Opc = AArch64::FMLAv8f16;
6455     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6456                            FMAInstKind::Accumulator);
6457     break;
6458 
6459   case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
6460   case MachineCombinerPattern::FMLAv2f64_OP1:
6461     RC = &AArch64::FPR128RegClass;
6462     if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
6463       Opc = AArch64::FMLAv2i64_indexed;
6464       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6465                              FMAInstKind::Indexed);
6466     } else {
6467       Opc = AArch64::FMLAv2f64;
6468       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6469                              FMAInstKind::Accumulator);
6470     }
6471     break;
6472   case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
6473   case MachineCombinerPattern::FMLAv2f64_OP2:
6474     RC = &AArch64::FPR128RegClass;
6475     if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
6476       Opc = AArch64::FMLAv2i64_indexed;
6477       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6478                              FMAInstKind::Indexed);
6479     } else {
6480       Opc = AArch64::FMLAv2f64;
6481       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6482                              FMAInstKind::Accumulator);
6483     }
6484     break;
6485 
6486   case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
6487   case MachineCombinerPattern::FMLAv4f32_OP1:
6488     RC = &AArch64::FPR128RegClass;
6489     if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
6490       Opc = AArch64::FMLAv4i32_indexed;
6491       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6492                              FMAInstKind::Indexed);
6493     } else {
6494       Opc = AArch64::FMLAv4f32;
6495       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6496                              FMAInstKind::Accumulator);
6497     }
6498     break;
6499 
6500   case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
6501   case MachineCombinerPattern::FMLAv4f32_OP2:
6502     RC = &AArch64::FPR128RegClass;
6503     if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
6504       Opc = AArch64::FMLAv4i32_indexed;
6505       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6506                              FMAInstKind::Indexed);
6507     } else {
6508       Opc = AArch64::FMLAv4f32;
6509       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6510                              FMAInstKind::Accumulator);
6511     }
6512     break;
6513 
6514   case MachineCombinerPattern::FMULSUBH_OP1:
6515     Opc = AArch64::FNMSUBHrrr;
6516     RC = &AArch64::FPR16RegClass;
6517     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6518     break;
6519   case MachineCombinerPattern::FMULSUBS_OP1:
6520     Opc = AArch64::FNMSUBSrrr;
6521     RC = &AArch64::FPR32RegClass;
6522     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6523     break;
6524   case MachineCombinerPattern::FMULSUBD_OP1:
6525     Opc = AArch64::FNMSUBDrrr;
6526     RC = &AArch64::FPR64RegClass;
6527     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6528     break;
6529 
6530   case MachineCombinerPattern::FNMULSUBH_OP1:
6531     Opc = AArch64::FNMADDHrrr;
6532     RC = &AArch64::FPR16RegClass;
6533     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6534     break;
6535   case MachineCombinerPattern::FNMULSUBS_OP1:
6536     Opc = AArch64::FNMADDSrrr;
6537     RC = &AArch64::FPR32RegClass;
6538     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6539     break;
6540   case MachineCombinerPattern::FNMULSUBD_OP1:
6541     Opc = AArch64::FNMADDDrrr;
6542     RC = &AArch64::FPR64RegClass;
6543     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
6544     break;
6545 
6546   case MachineCombinerPattern::FMULSUBH_OP2:
6547     Opc = AArch64::FMSUBHrrr;
6548     RC = &AArch64::FPR16RegClass;
6549     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6550     break;
6551   case MachineCombinerPattern::FMULSUBS_OP2:
6552     Opc = AArch64::FMSUBSrrr;
6553     RC = &AArch64::FPR32RegClass;
6554     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6555     break;
6556   case MachineCombinerPattern::FMULSUBD_OP2:
6557     Opc = AArch64::FMSUBDrrr;
6558     RC = &AArch64::FPR64RegClass;
6559     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
6560     break;
6561 
6562   case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
6563     Opc = AArch64::FMLSv1i32_indexed;
6564     RC = &AArch64::FPR32RegClass;
6565     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6566                            FMAInstKind::Indexed);
6567     break;
6568 
6569   case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
6570     Opc = AArch64::FMLSv1i64_indexed;
6571     RC = &AArch64::FPR64RegClass;
6572     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6573                            FMAInstKind::Indexed);
6574     break;
6575 
6576   case MachineCombinerPattern::FMLSv4f16_OP1:
6577   case MachineCombinerPattern::FMLSv4i16_indexed_OP1: {
6578     RC = &AArch64::FPR64RegClass;
6579     Register NewVR = MRI.createVirtualRegister(RC);
6580     MachineInstrBuilder MIB1 =
6581         BuildMI(MF, MIMetadata(Root), TII->get(AArch64::FNEGv4f16), NewVR)
6582             .add(Root.getOperand(2));
6583     InsInstrs.push_back(MIB1);
6584     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6585     if (Pattern == MachineCombinerPattern::FMLSv4f16_OP1) {
6586       Opc = AArch64::FMLAv4f16;
6587       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6588                              FMAInstKind::Accumulator, &NewVR);
6589     } else {
6590       Opc = AArch64::FMLAv4i16_indexed;
6591       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6592                              FMAInstKind::Indexed, &NewVR);
6593     }
6594     break;
6595   }
6596   case MachineCombinerPattern::FMLSv4f16_OP2:
6597     RC = &AArch64::FPR64RegClass;
6598     Opc = AArch64::FMLSv4f16;
6599     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6600                            FMAInstKind::Accumulator);
6601     break;
6602   case MachineCombinerPattern::FMLSv4i16_indexed_OP2:
6603     RC = &AArch64::FPR64RegClass;
6604     Opc = AArch64::FMLSv4i16_indexed;
6605     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6606                            FMAInstKind::Indexed);
6607     break;
6608 
6609   case MachineCombinerPattern::FMLSv2f32_OP2:
6610   case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
6611     RC = &AArch64::FPR64RegClass;
6612     if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
6613       Opc = AArch64::FMLSv2i32_indexed;
6614       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6615                              FMAInstKind::Indexed);
6616     } else {
6617       Opc = AArch64::FMLSv2f32;
6618       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6619                              FMAInstKind::Accumulator);
6620     }
6621     break;
6622 
6623   case MachineCombinerPattern::FMLSv8f16_OP1:
6624   case MachineCombinerPattern::FMLSv8i16_indexed_OP1: {
6625     RC = &AArch64::FPR128RegClass;
6626     Register NewVR = MRI.createVirtualRegister(RC);
6627     MachineInstrBuilder MIB1 =
6628         BuildMI(MF, MIMetadata(Root), TII->get(AArch64::FNEGv8f16), NewVR)
6629             .add(Root.getOperand(2));
6630     InsInstrs.push_back(MIB1);
6631     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6632     if (Pattern == MachineCombinerPattern::FMLSv8f16_OP1) {
6633       Opc = AArch64::FMLAv8f16;
6634       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6635                              FMAInstKind::Accumulator, &NewVR);
6636     } else {
6637       Opc = AArch64::FMLAv8i16_indexed;
6638       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6639                              FMAInstKind::Indexed, &NewVR);
6640     }
6641     break;
6642   }
6643   case MachineCombinerPattern::FMLSv8f16_OP2:
6644     RC = &AArch64::FPR128RegClass;
6645     Opc = AArch64::FMLSv8f16;
6646     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6647                            FMAInstKind::Accumulator);
6648     break;
6649   case MachineCombinerPattern::FMLSv8i16_indexed_OP2:
6650     RC = &AArch64::FPR128RegClass;
6651     Opc = AArch64::FMLSv8i16_indexed;
6652     MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6653                            FMAInstKind::Indexed);
6654     break;
6655 
6656   case MachineCombinerPattern::FMLSv2f64_OP2:
6657   case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
6658     RC = &AArch64::FPR128RegClass;
6659     if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
6660       Opc = AArch64::FMLSv2i64_indexed;
6661       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6662                              FMAInstKind::Indexed);
6663     } else {
6664       Opc = AArch64::FMLSv2f64;
6665       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6666                              FMAInstKind::Accumulator);
6667     }
6668     break;
6669 
6670   case MachineCombinerPattern::FMLSv4f32_OP2:
6671   case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
6672     RC = &AArch64::FPR128RegClass;
6673     if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
6674       Opc = AArch64::FMLSv4i32_indexed;
6675       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6676                              FMAInstKind::Indexed);
6677     } else {
6678       Opc = AArch64::FMLSv4f32;
6679       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
6680                              FMAInstKind::Accumulator);
6681     }
6682     break;
6683   case MachineCombinerPattern::FMLSv2f32_OP1:
6684   case MachineCombinerPattern::FMLSv2i32_indexed_OP1: {
6685     RC = &AArch64::FPR64RegClass;
6686     Register NewVR = MRI.createVirtualRegister(RC);
6687     MachineInstrBuilder MIB1 =
6688         BuildMI(MF, MIMetadata(Root), TII->get(AArch64::FNEGv2f32), NewVR)
6689             .add(Root.getOperand(2));
6690     InsInstrs.push_back(MIB1);
6691     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6692     if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP1) {
6693       Opc = AArch64::FMLAv2i32_indexed;
6694       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6695                              FMAInstKind::Indexed, &NewVR);
6696     } else {
6697       Opc = AArch64::FMLAv2f32;
6698       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6699                              FMAInstKind::Accumulator, &NewVR);
6700     }
6701     break;
6702   }
6703   case MachineCombinerPattern::FMLSv4f32_OP1:
6704   case MachineCombinerPattern::FMLSv4i32_indexed_OP1: {
6705     RC = &AArch64::FPR128RegClass;
6706     Register NewVR = MRI.createVirtualRegister(RC);
6707     MachineInstrBuilder MIB1 =
6708         BuildMI(MF, MIMetadata(Root), TII->get(AArch64::FNEGv4f32), NewVR)
6709             .add(Root.getOperand(2));
6710     InsInstrs.push_back(MIB1);
6711     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6712     if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP1) {
6713       Opc = AArch64::FMLAv4i32_indexed;
6714       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6715                              FMAInstKind::Indexed, &NewVR);
6716     } else {
6717       Opc = AArch64::FMLAv4f32;
6718       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6719                              FMAInstKind::Accumulator, &NewVR);
6720     }
6721     break;
6722   }
6723   case MachineCombinerPattern::FMLSv2f64_OP1:
6724   case MachineCombinerPattern::FMLSv2i64_indexed_OP1: {
6725     RC = &AArch64::FPR128RegClass;
6726     Register NewVR = MRI.createVirtualRegister(RC);
6727     MachineInstrBuilder MIB1 =
6728         BuildMI(MF, MIMetadata(Root), TII->get(AArch64::FNEGv2f64), NewVR)
6729             .add(Root.getOperand(2));
6730     InsInstrs.push_back(MIB1);
6731     InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
6732     if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP1) {
6733       Opc = AArch64::FMLAv2i64_indexed;
6734       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6735                              FMAInstKind::Indexed, &NewVR);
6736     } else {
6737       Opc = AArch64::FMLAv2f64;
6738       MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
6739                              FMAInstKind::Accumulator, &NewVR);
6740     }
6741     break;
6742   }
6743   case MachineCombinerPattern::FMULv2i32_indexed_OP1:
6744   case MachineCombinerPattern::FMULv2i32_indexed_OP2: {
6745     unsigned IdxDupOp =
6746         (Pattern == MachineCombinerPattern::FMULv2i32_indexed_OP1) ? 1 : 2;
6747     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv2i32_indexed,
6748                        &AArch64::FPR128RegClass, MRI);
6749     break;
6750   }
6751   case MachineCombinerPattern::FMULv2i64_indexed_OP1:
6752   case MachineCombinerPattern::FMULv2i64_indexed_OP2: {
6753     unsigned IdxDupOp =
6754         (Pattern == MachineCombinerPattern::FMULv2i64_indexed_OP1) ? 1 : 2;
6755     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv2i64_indexed,
6756                        &AArch64::FPR128RegClass, MRI);
6757     break;
6758   }
6759   case MachineCombinerPattern::FMULv4i16_indexed_OP1:
6760   case MachineCombinerPattern::FMULv4i16_indexed_OP2: {
6761     unsigned IdxDupOp =
6762         (Pattern == MachineCombinerPattern::FMULv4i16_indexed_OP1) ? 1 : 2;
6763     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv4i16_indexed,
6764                        &AArch64::FPR128_loRegClass, MRI);
6765     break;
6766   }
6767   case MachineCombinerPattern::FMULv4i32_indexed_OP1:
6768   case MachineCombinerPattern::FMULv4i32_indexed_OP2: {
6769     unsigned IdxDupOp =
6770         (Pattern == MachineCombinerPattern::FMULv4i32_indexed_OP1) ? 1 : 2;
6771     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv4i32_indexed,
6772                        &AArch64::FPR128RegClass, MRI);
6773     break;
6774   }
6775   case MachineCombinerPattern::FMULv8i16_indexed_OP1:
6776   case MachineCombinerPattern::FMULv8i16_indexed_OP2: {
6777     unsigned IdxDupOp =
6778         (Pattern == MachineCombinerPattern::FMULv8i16_indexed_OP1) ? 1 : 2;
6779     genIndexedMultiply(Root, InsInstrs, IdxDupOp, AArch64::FMULv8i16_indexed,
6780                        &AArch64::FPR128_loRegClass, MRI);
6781     break;
6782   }
6783   } // end switch (Pattern)
6784   // Record MUL and ADD/SUB for deletion
6785   if (MUL)
6786     DelInstrs.push_back(MUL);
6787   DelInstrs.push_back(&Root);
6788 
6789   // Set the flags on the inserted instructions to be the merged flags of the
6790   // instructions that we have combined.
6791   uint16_t Flags = Root.getFlags();
6792   if (MUL)
6793     Flags = Root.mergeFlagsWith(*MUL);
6794   for (auto *MI : InsInstrs)
6795     MI->setFlags(Flags);
6796 }
6797 
6798 /// Replace csincr-branch sequence by simple conditional branch
6799 ///
6800 /// Examples:
6801 /// 1. \code
6802 ///   csinc  w9, wzr, wzr, <condition code>
6803 ///   tbnz   w9, #0, 0x44
6804 ///    \endcode
6805 /// to
6806 ///    \code
6807 ///   b.<inverted condition code>
6808 ///    \endcode
6809 ///
6810 /// 2. \code
6811 ///   csinc w9, wzr, wzr, <condition code>
6812 ///   tbz   w9, #0, 0x44
6813 ///    \endcode
6814 /// to
6815 ///    \code
6816 ///   b.<condition code>
6817 ///    \endcode
6818 ///
6819 /// Replace compare and branch sequence by TBZ/TBNZ instruction when the
6820 /// compare's constant operand is power of 2.
6821 ///
6822 /// Examples:
6823 ///    \code
6824 ///   and  w8, w8, #0x400
6825 ///   cbnz w8, L1
6826 ///    \endcode
6827 /// to
6828 ///    \code
6829 ///   tbnz w8, #10, L1
6830 ///    \endcode
6831 ///
6832 /// \param  MI Conditional Branch
6833 /// \return True when the simple conditional branch is generated
6834 ///
optimizeCondBranch(MachineInstr & MI) const6835 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
6836   bool IsNegativeBranch = false;
6837   bool IsTestAndBranch = false;
6838   unsigned TargetBBInMI = 0;
6839   switch (MI.getOpcode()) {
6840   default:
6841     llvm_unreachable("Unknown branch instruction?");
6842   case AArch64::Bcc:
6843     return false;
6844   case AArch64::CBZW:
6845   case AArch64::CBZX:
6846     TargetBBInMI = 1;
6847     break;
6848   case AArch64::CBNZW:
6849   case AArch64::CBNZX:
6850     TargetBBInMI = 1;
6851     IsNegativeBranch = true;
6852     break;
6853   case AArch64::TBZW:
6854   case AArch64::TBZX:
6855     TargetBBInMI = 2;
6856     IsTestAndBranch = true;
6857     break;
6858   case AArch64::TBNZW:
6859   case AArch64::TBNZX:
6860     TargetBBInMI = 2;
6861     IsNegativeBranch = true;
6862     IsTestAndBranch = true;
6863     break;
6864   }
6865   // So we increment a zero register and test for bits other
6866   // than bit 0? Conservatively bail out in case the verifier
6867   // missed this case.
6868   if (IsTestAndBranch && MI.getOperand(1).getImm())
6869     return false;
6870 
6871   // Find Definition.
6872   assert(MI.getParent() && "Incomplete machine instruciton\n");
6873   MachineBasicBlock *MBB = MI.getParent();
6874   MachineFunction *MF = MBB->getParent();
6875   MachineRegisterInfo *MRI = &MF->getRegInfo();
6876   Register VReg = MI.getOperand(0).getReg();
6877   if (!VReg.isVirtual())
6878     return false;
6879 
6880   MachineInstr *DefMI = MRI->getVRegDef(VReg);
6881 
6882   // Look through COPY instructions to find definition.
6883   while (DefMI->isCopy()) {
6884     Register CopyVReg = DefMI->getOperand(1).getReg();
6885     if (!MRI->hasOneNonDBGUse(CopyVReg))
6886       return false;
6887     if (!MRI->hasOneDef(CopyVReg))
6888       return false;
6889     DefMI = MRI->getVRegDef(CopyVReg);
6890   }
6891 
6892   switch (DefMI->getOpcode()) {
6893   default:
6894     return false;
6895   // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
6896   case AArch64::ANDWri:
6897   case AArch64::ANDXri: {
6898     if (IsTestAndBranch)
6899       return false;
6900     if (DefMI->getParent() != MBB)
6901       return false;
6902     if (!MRI->hasOneNonDBGUse(VReg))
6903       return false;
6904 
6905     bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
6906     uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
6907         DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
6908     if (!isPowerOf2_64(Mask))
6909       return false;
6910 
6911     MachineOperand &MO = DefMI->getOperand(1);
6912     Register NewReg = MO.getReg();
6913     if (!NewReg.isVirtual())
6914       return false;
6915 
6916     assert(!MRI->def_empty(NewReg) && "Register must be defined.");
6917 
6918     MachineBasicBlock &RefToMBB = *MBB;
6919     MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
6920     DebugLoc DL = MI.getDebugLoc();
6921     unsigned Imm = Log2_64(Mask);
6922     unsigned Opc = (Imm < 32)
6923                        ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
6924                        : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
6925     MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
6926                               .addReg(NewReg)
6927                               .addImm(Imm)
6928                               .addMBB(TBB);
6929     // Register lives on to the CBZ now.
6930     MO.setIsKill(false);
6931 
6932     // For immediate smaller than 32, we need to use the 32-bit
6933     // variant (W) in all cases. Indeed the 64-bit variant does not
6934     // allow to encode them.
6935     // Therefore, if the input register is 64-bit, we need to take the
6936     // 32-bit sub-part.
6937     if (!Is32Bit && Imm < 32)
6938       NewMI->getOperand(0).setSubReg(AArch64::sub_32);
6939     MI.eraseFromParent();
6940     return true;
6941   }
6942   // Look for CSINC
6943   case AArch64::CSINCWr:
6944   case AArch64::CSINCXr: {
6945     if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
6946           DefMI->getOperand(2).getReg() == AArch64::WZR) &&
6947         !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
6948           DefMI->getOperand(2).getReg() == AArch64::XZR))
6949       return false;
6950 
6951     if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
6952       return false;
6953 
6954     AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
6955     // Convert only when the condition code is not modified between
6956     // the CSINC and the branch. The CC may be used by other
6957     // instructions in between.
6958     if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
6959       return false;
6960     MachineBasicBlock &RefToMBB = *MBB;
6961     MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
6962     DebugLoc DL = MI.getDebugLoc();
6963     if (IsNegativeBranch)
6964       CC = AArch64CC::getInvertedCondCode(CC);
6965     BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
6966     MI.eraseFromParent();
6967     return true;
6968   }
6969   }
6970 }
6971 
6972 std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const6973 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
6974   const unsigned Mask = AArch64II::MO_FRAGMENT;
6975   return std::make_pair(TF & Mask, TF & ~Mask);
6976 }
6977 
6978 ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const6979 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
6980   using namespace AArch64II;
6981 
6982   static const std::pair<unsigned, const char *> TargetFlags[] = {
6983       {MO_PAGE, "aarch64-page"}, {MO_PAGEOFF, "aarch64-pageoff"},
6984       {MO_G3, "aarch64-g3"},     {MO_G2, "aarch64-g2"},
6985       {MO_G1, "aarch64-g1"},     {MO_G0, "aarch64-g0"},
6986       {MO_HI12, "aarch64-hi12"}};
6987   return ArrayRef(TargetFlags);
6988 }
6989 
6990 ArrayRef<std::pair<unsigned, const char *>>
getSerializableBitmaskMachineOperandTargetFlags() const6991 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
6992   using namespace AArch64II;
6993 
6994   static const std::pair<unsigned, const char *> TargetFlags[] = {
6995       {MO_COFFSTUB, "aarch64-coffstub"},
6996       {MO_GOT, "aarch64-got"},
6997       {MO_NC, "aarch64-nc"},
6998       {MO_S, "aarch64-s"},
6999       {MO_TLS, "aarch64-tls"},
7000       {MO_DLLIMPORT, "aarch64-dllimport"},
7001       {MO_DLLIMPORTAUX, "aarch64-dllimportaux"},
7002       {MO_PREL, "aarch64-prel"},
7003       {MO_TAGGED, "aarch64-tagged"}};
7004   return ArrayRef(TargetFlags);
7005 }
7006 
7007 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
getSerializableMachineMemOperandTargetFlags() const7008 AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const {
7009   static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
7010       {{MOSuppressPair, "aarch64-suppress-pair"},
7011        {MOStridedAccess, "aarch64-strided-access"}};
7012   return ArrayRef(TargetFlags);
7013 }
7014 
7015 /// Constants defining how certain sequences should be outlined.
7016 /// This encompasses how an outlined function should be called, and what kind of
7017 /// frame should be emitted for that outlined function.
7018 ///
7019 /// \p MachineOutlinerDefault implies that the function should be called with
7020 /// a save and restore of LR to the stack.
7021 ///
7022 /// That is,
7023 ///
7024 /// I1     Save LR                    OUTLINED_FUNCTION:
7025 /// I2 --> BL OUTLINED_FUNCTION       I1
7026 /// I3     Restore LR                 I2
7027 ///                                   I3
7028 ///                                   RET
7029 ///
7030 /// * Call construction overhead: 3 (save + BL + restore)
7031 /// * Frame construction overhead: 1 (ret)
7032 /// * Requires stack fixups? Yes
7033 ///
7034 /// \p MachineOutlinerTailCall implies that the function is being created from
7035 /// a sequence of instructions ending in a return.
7036 ///
7037 /// That is,
7038 ///
7039 /// I1                             OUTLINED_FUNCTION:
7040 /// I2 --> B OUTLINED_FUNCTION     I1
7041 /// RET                            I2
7042 ///                                RET
7043 ///
7044 /// * Call construction overhead: 1 (B)
7045 /// * Frame construction overhead: 0 (Return included in sequence)
7046 /// * Requires stack fixups? No
7047 ///
7048 /// \p MachineOutlinerNoLRSave implies that the function should be called using
7049 /// a BL instruction, but doesn't require LR to be saved and restored. This
7050 /// happens when LR is known to be dead.
7051 ///
7052 /// That is,
7053 ///
7054 /// I1                                OUTLINED_FUNCTION:
7055 /// I2 --> BL OUTLINED_FUNCTION       I1
7056 /// I3                                I2
7057 ///                                   I3
7058 ///                                   RET
7059 ///
7060 /// * Call construction overhead: 1 (BL)
7061 /// * Frame construction overhead: 1 (RET)
7062 /// * Requires stack fixups? No
7063 ///
7064 /// \p MachineOutlinerThunk implies that the function is being created from
7065 /// a sequence of instructions ending in a call. The outlined function is
7066 /// called with a BL instruction, and the outlined function tail-calls the
7067 /// original call destination.
7068 ///
7069 /// That is,
7070 ///
7071 /// I1                                OUTLINED_FUNCTION:
7072 /// I2 --> BL OUTLINED_FUNCTION       I1
7073 /// BL f                              I2
7074 ///                                   B f
7075 /// * Call construction overhead: 1 (BL)
7076 /// * Frame construction overhead: 0
7077 /// * Requires stack fixups? No
7078 ///
7079 /// \p MachineOutlinerRegSave implies that the function should be called with a
7080 /// save and restore of LR to an available register. This allows us to avoid
7081 /// stack fixups. Note that this outlining variant is compatible with the
7082 /// NoLRSave case.
7083 ///
7084 /// That is,
7085 ///
7086 /// I1     Save LR                    OUTLINED_FUNCTION:
7087 /// I2 --> BL OUTLINED_FUNCTION       I1
7088 /// I3     Restore LR                 I2
7089 ///                                   I3
7090 ///                                   RET
7091 ///
7092 /// * Call construction overhead: 3 (save + BL + restore)
7093 /// * Frame construction overhead: 1 (ret)
7094 /// * Requires stack fixups? No
7095 enum MachineOutlinerClass {
7096   MachineOutlinerDefault,  /// Emit a save, restore, call, and return.
7097   MachineOutlinerTailCall, /// Only emit a branch.
7098   MachineOutlinerNoLRSave, /// Emit a call and return.
7099   MachineOutlinerThunk,    /// Emit a call and tail-call.
7100   MachineOutlinerRegSave   /// Same as default, but save to a register.
7101 };
7102 
7103 enum MachineOutlinerMBBFlags {
7104   LRUnavailableSomewhere = 0x2,
7105   HasCalls = 0x4,
7106   UnsafeRegsDead = 0x8
7107 };
7108 
7109 Register
findRegisterToSaveLRTo(outliner::Candidate & C) const7110 AArch64InstrInfo::findRegisterToSaveLRTo(outliner::Candidate &C) const {
7111   MachineFunction *MF = C.getMF();
7112   const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
7113   const AArch64RegisterInfo *ARI =
7114       static_cast<const AArch64RegisterInfo *>(&TRI);
7115   // Check if there is an available register across the sequence that we can
7116   // use.
7117   for (unsigned Reg : AArch64::GPR64RegClass) {
7118     if (!ARI->isReservedReg(*MF, Reg) &&
7119         Reg != AArch64::LR &&  // LR is not reserved, but don't use it.
7120         Reg != AArch64::X16 && // X16 is not guaranteed to be preserved.
7121         Reg != AArch64::X17 && // Ditto for X17.
7122         C.isAvailableAcrossAndOutOfSeq(Reg, TRI) &&
7123         C.isAvailableInsideSeq(Reg, TRI))
7124       return Reg;
7125   }
7126   return Register();
7127 }
7128 
7129 static bool
outliningCandidatesSigningScopeConsensus(const outliner::Candidate & a,const outliner::Candidate & b)7130 outliningCandidatesSigningScopeConsensus(const outliner::Candidate &a,
7131                                          const outliner::Candidate &b) {
7132   const auto &MFIa = a.getMF()->getInfo<AArch64FunctionInfo>();
7133   const auto &MFIb = b.getMF()->getInfo<AArch64FunctionInfo>();
7134 
7135   return MFIa->shouldSignReturnAddress(false) == MFIb->shouldSignReturnAddress(false) &&
7136          MFIa->shouldSignReturnAddress(true) == MFIb->shouldSignReturnAddress(true);
7137 }
7138 
7139 static bool
outliningCandidatesSigningKeyConsensus(const outliner::Candidate & a,const outliner::Candidate & b)7140 outliningCandidatesSigningKeyConsensus(const outliner::Candidate &a,
7141                                        const outliner::Candidate &b) {
7142   const auto &MFIa = a.getMF()->getInfo<AArch64FunctionInfo>();
7143   const auto &MFIb = b.getMF()->getInfo<AArch64FunctionInfo>();
7144 
7145   return MFIa->shouldSignWithBKey() == MFIb->shouldSignWithBKey();
7146 }
7147 
outliningCandidatesV8_3OpsConsensus(const outliner::Candidate & a,const outliner::Candidate & b)7148 static bool outliningCandidatesV8_3OpsConsensus(const outliner::Candidate &a,
7149                                                 const outliner::Candidate &b) {
7150   const AArch64Subtarget &SubtargetA =
7151       a.getMF()->getSubtarget<AArch64Subtarget>();
7152   const AArch64Subtarget &SubtargetB =
7153       b.getMF()->getSubtarget<AArch64Subtarget>();
7154   return SubtargetA.hasV8_3aOps() == SubtargetB.hasV8_3aOps();
7155 }
7156 
getOutliningCandidateInfo(std::vector<outliner::Candidate> & RepeatedSequenceLocs) const7157 outliner::OutlinedFunction AArch64InstrInfo::getOutliningCandidateInfo(
7158     std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
7159   outliner::Candidate &FirstCand = RepeatedSequenceLocs[0];
7160   unsigned SequenceSize =
7161       std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0,
7162                       [this](unsigned Sum, const MachineInstr &MI) {
7163                         return Sum + getInstSizeInBytes(MI);
7164                       });
7165   unsigned NumBytesToCreateFrame = 0;
7166 
7167   // We only allow outlining for functions having exactly matching return
7168   // address signing attributes, i.e., all share the same value for the
7169   // attribute "sign-return-address" and all share the same type of key they
7170   // are signed with.
7171   // Additionally we require all functions to simultaniously either support
7172   // v8.3a features or not. Otherwise an outlined function could get signed
7173   // using dedicated v8.3 instructions and a call from a function that doesn't
7174   // support v8.3 instructions would therefore be invalid.
7175   if (std::adjacent_find(
7176           RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
7177           [](const outliner::Candidate &a, const outliner::Candidate &b) {
7178             // Return true if a and b are non-equal w.r.t. return address
7179             // signing or support of v8.3a features
7180             if (outliningCandidatesSigningScopeConsensus(a, b) &&
7181                 outliningCandidatesSigningKeyConsensus(a, b) &&
7182                 outliningCandidatesV8_3OpsConsensus(a, b)) {
7183               return false;
7184             }
7185             return true;
7186           }) != RepeatedSequenceLocs.end()) {
7187     return outliner::OutlinedFunction();
7188   }
7189 
7190   // Since at this point all candidates agree on their return address signing
7191   // picking just one is fine. If the candidate functions potentially sign their
7192   // return addresses, the outlined function should do the same. Note that in
7193   // the case of "sign-return-address"="non-leaf" this is an assumption: It is
7194   // not certainly true that the outlined function will have to sign its return
7195   // address but this decision is made later, when the decision to outline
7196   // has already been made.
7197   // The same holds for the number of additional instructions we need: On
7198   // v8.3a RET can be replaced by RETAA/RETAB and no AUT instruction is
7199   // necessary. However, at this point we don't know if the outlined function
7200   // will have a RET instruction so we assume the worst.
7201   const TargetRegisterInfo &TRI = getRegisterInfo();
7202   if (FirstCand.getMF()
7203           ->getInfo<AArch64FunctionInfo>()
7204           ->shouldSignReturnAddress(true)) {
7205     // One PAC and one AUT instructions
7206     NumBytesToCreateFrame += 8;
7207 
7208     // We have to check if sp modifying instructions would get outlined.
7209     // If so we only allow outlining if sp is unchanged overall, so matching
7210     // sub and add instructions are okay to outline, all other sp modifications
7211     // are not
7212     auto hasIllegalSPModification = [&TRI](outliner::Candidate &C) {
7213       int SPValue = 0;
7214       MachineBasicBlock::iterator MBBI = C.front();
7215       for (;;) {
7216         if (MBBI->modifiesRegister(AArch64::SP, &TRI)) {
7217           switch (MBBI->getOpcode()) {
7218           case AArch64::ADDXri:
7219           case AArch64::ADDWri:
7220             assert(MBBI->getNumOperands() == 4 && "Wrong number of operands");
7221             assert(MBBI->getOperand(2).isImm() &&
7222                    "Expected operand to be immediate");
7223             assert(MBBI->getOperand(1).isReg() &&
7224                    "Expected operand to be a register");
7225             // Check if the add just increments sp. If so, we search for
7226             // matching sub instructions that decrement sp. If not, the
7227             // modification is illegal
7228             if (MBBI->getOperand(1).getReg() == AArch64::SP)
7229               SPValue += MBBI->getOperand(2).getImm();
7230             else
7231               return true;
7232             break;
7233           case AArch64::SUBXri:
7234           case AArch64::SUBWri:
7235             assert(MBBI->getNumOperands() == 4 && "Wrong number of operands");
7236             assert(MBBI->getOperand(2).isImm() &&
7237                    "Expected operand to be immediate");
7238             assert(MBBI->getOperand(1).isReg() &&
7239                    "Expected operand to be a register");
7240             // Check if the sub just decrements sp. If so, we search for
7241             // matching add instructions that increment sp. If not, the
7242             // modification is illegal
7243             if (MBBI->getOperand(1).getReg() == AArch64::SP)
7244               SPValue -= MBBI->getOperand(2).getImm();
7245             else
7246               return true;
7247             break;
7248           default:
7249             return true;
7250           }
7251         }
7252         if (MBBI == C.back())
7253           break;
7254         ++MBBI;
7255       }
7256       if (SPValue)
7257         return true;
7258       return false;
7259     };
7260     // Remove candidates with illegal stack modifying instructions
7261     llvm::erase_if(RepeatedSequenceLocs, hasIllegalSPModification);
7262 
7263     // If the sequence doesn't have enough candidates left, then we're done.
7264     if (RepeatedSequenceLocs.size() < 2)
7265       return outliner::OutlinedFunction();
7266   }
7267 
7268   // Properties about candidate MBBs that hold for all of them.
7269   unsigned FlagsSetInAll = 0xF;
7270 
7271   // Compute liveness information for each candidate, and set FlagsSetInAll.
7272   for (outliner::Candidate &C : RepeatedSequenceLocs)
7273     FlagsSetInAll &= C.Flags;
7274 
7275   // According to the AArch64 Procedure Call Standard, the following are
7276   // undefined on entry/exit from a function call:
7277   //
7278   // * Registers x16, x17, (and thus w16, w17)
7279   // * Condition codes (and thus the NZCV register)
7280   //
7281   // Because if this, we can't outline any sequence of instructions where
7282   // one
7283   // of these registers is live into/across it. Thus, we need to delete
7284   // those
7285   // candidates.
7286   auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) {
7287     // If the unsafe registers in this block are all dead, then we don't need
7288     // to compute liveness here.
7289     if (C.Flags & UnsafeRegsDead)
7290       return false;
7291     return C.isAnyUnavailableAcrossOrOutOfSeq(
7292         {AArch64::W16, AArch64::W17, AArch64::NZCV}, TRI);
7293   };
7294 
7295   // Are there any candidates where those registers are live?
7296   if (!(FlagsSetInAll & UnsafeRegsDead)) {
7297     // Erase every candidate that violates the restrictions above. (It could be
7298     // true that we have viable candidates, so it's not worth bailing out in
7299     // the case that, say, 1 out of 20 candidates violate the restructions.)
7300     llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
7301 
7302     // If the sequence doesn't have enough candidates left, then we're done.
7303     if (RepeatedSequenceLocs.size() < 2)
7304       return outliner::OutlinedFunction();
7305   }
7306 
7307   // At this point, we have only "safe" candidates to outline. Figure out
7308   // frame + call instruction information.
7309 
7310   unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode();
7311 
7312   // Helper lambda which sets call information for every candidate.
7313   auto SetCandidateCallInfo =
7314       [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) {
7315         for (outliner::Candidate &C : RepeatedSequenceLocs)
7316           C.setCallInfo(CallID, NumBytesForCall);
7317       };
7318 
7319   unsigned FrameID = MachineOutlinerDefault;
7320   NumBytesToCreateFrame += 4;
7321 
7322   bool HasBTI = any_of(RepeatedSequenceLocs, [](outliner::Candidate &C) {
7323     return C.getMF()->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement();
7324   });
7325 
7326   // We check to see if CFI Instructions are present, and if they are
7327   // we find the number of CFI Instructions in the candidates.
7328   unsigned CFICount = 0;
7329   for (auto &I : make_range(RepeatedSequenceLocs[0].front(),
7330                             std::next(RepeatedSequenceLocs[0].back()))) {
7331     if (I.isCFIInstruction())
7332       CFICount++;
7333   }
7334 
7335   // We compare the number of found CFI Instructions to  the number of CFI
7336   // instructions in the parent function for each candidate.  We must check this
7337   // since if we outline one of the CFI instructions in a function, we have to
7338   // outline them all for correctness. If we do not, the address offsets will be
7339   // incorrect between the two sections of the program.
7340   for (outliner::Candidate &C : RepeatedSequenceLocs) {
7341     std::vector<MCCFIInstruction> CFIInstructions =
7342         C.getMF()->getFrameInstructions();
7343 
7344     if (CFICount > 0 && CFICount != CFIInstructions.size())
7345       return outliner::OutlinedFunction();
7346   }
7347 
7348   // Returns true if an instructions is safe to fix up, false otherwise.
7349   auto IsSafeToFixup = [this, &TRI](MachineInstr &MI) {
7350     if (MI.isCall())
7351       return true;
7352 
7353     if (!MI.modifiesRegister(AArch64::SP, &TRI) &&
7354         !MI.readsRegister(AArch64::SP, &TRI))
7355       return true;
7356 
7357     // Any modification of SP will break our code to save/restore LR.
7358     // FIXME: We could handle some instructions which add a constant
7359     // offset to SP, with a bit more work.
7360     if (MI.modifiesRegister(AArch64::SP, &TRI))
7361       return false;
7362 
7363     // At this point, we have a stack instruction that we might need to
7364     // fix up. We'll handle it if it's a load or store.
7365     if (MI.mayLoadOrStore()) {
7366       const MachineOperand *Base; // Filled with the base operand of MI.
7367       int64_t Offset;             // Filled with the offset of MI.
7368       bool OffsetIsScalable;
7369 
7370       // Does it allow us to offset the base operand and is the base the
7371       // register SP?
7372       if (!getMemOperandWithOffset(MI, Base, Offset, OffsetIsScalable, &TRI) ||
7373           !Base->isReg() || Base->getReg() != AArch64::SP)
7374         return false;
7375 
7376       // Fixe-up code below assumes bytes.
7377       if (OffsetIsScalable)
7378         return false;
7379 
7380       // Find the minimum/maximum offset for this instruction and check
7381       // if fixing it up would be in range.
7382       int64_t MinOffset,
7383           MaxOffset;  // Unscaled offsets for the instruction.
7384       TypeSize Scale(0U, false); // The scale to multiply the offsets by.
7385       unsigned DummyWidth;
7386       getMemOpInfo(MI.getOpcode(), Scale, DummyWidth, MinOffset, MaxOffset);
7387 
7388       Offset += 16; // Update the offset to what it would be if we outlined.
7389       if (Offset < MinOffset * (int64_t)Scale.getFixedValue() ||
7390           Offset > MaxOffset * (int64_t)Scale.getFixedValue())
7391         return false;
7392 
7393       // It's in range, so we can outline it.
7394       return true;
7395     }
7396 
7397     // FIXME: Add handling for instructions like "add x0, sp, #8".
7398 
7399     // We can't fix it up, so don't outline it.
7400     return false;
7401   };
7402 
7403   // True if it's possible to fix up each stack instruction in this sequence.
7404   // Important for frames/call variants that modify the stack.
7405   bool AllStackInstrsSafe = std::all_of(
7406       FirstCand.front(), std::next(FirstCand.back()), IsSafeToFixup);
7407 
7408   // If the last instruction in any candidate is a terminator, then we should
7409   // tail call all of the candidates.
7410   if (RepeatedSequenceLocs[0].back()->isTerminator()) {
7411     FrameID = MachineOutlinerTailCall;
7412     NumBytesToCreateFrame = 0;
7413     SetCandidateCallInfo(MachineOutlinerTailCall, 4);
7414   }
7415 
7416   else if (LastInstrOpcode == AArch64::BL ||
7417            ((LastInstrOpcode == AArch64::BLR ||
7418              LastInstrOpcode == AArch64::BLRNoIP) &&
7419             !HasBTI)) {
7420     // FIXME: Do we need to check if the code after this uses the value of LR?
7421     FrameID = MachineOutlinerThunk;
7422     NumBytesToCreateFrame = 0;
7423     SetCandidateCallInfo(MachineOutlinerThunk, 4);
7424   }
7425 
7426   else {
7427     // We need to decide how to emit calls + frames. We can always emit the same
7428     // frame if we don't need to save to the stack. If we have to save to the
7429     // stack, then we need a different frame.
7430     unsigned NumBytesNoStackCalls = 0;
7431     std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
7432 
7433     // Check if we have to save LR.
7434     for (outliner::Candidate &C : RepeatedSequenceLocs) {
7435       // If we have a noreturn caller, then we're going to be conservative and
7436       // say that we have to save LR. If we don't have a ret at the end of the
7437       // block, then we can't reason about liveness accurately.
7438       //
7439       // FIXME: We can probably do better than always disabling this in
7440       // noreturn functions by fixing up the liveness info.
7441       bool IsNoReturn =
7442           C.getMF()->getFunction().hasFnAttribute(Attribute::NoReturn);
7443 
7444       // Is LR available? If so, we don't need a save.
7445       if (C.isAvailableAcrossAndOutOfSeq(AArch64::LR, TRI) && !IsNoReturn) {
7446         NumBytesNoStackCalls += 4;
7447         C.setCallInfo(MachineOutlinerNoLRSave, 4);
7448         CandidatesWithoutStackFixups.push_back(C);
7449       }
7450 
7451       // Is an unused register available? If so, we won't modify the stack, so
7452       // we can outline with the same frame type as those that don't save LR.
7453       else if (findRegisterToSaveLRTo(C)) {
7454         NumBytesNoStackCalls += 12;
7455         C.setCallInfo(MachineOutlinerRegSave, 12);
7456         CandidatesWithoutStackFixups.push_back(C);
7457       }
7458 
7459       // Is SP used in the sequence at all? If not, we don't have to modify
7460       // the stack, so we are guaranteed to get the same frame.
7461       else if (C.isAvailableInsideSeq(AArch64::SP, TRI)) {
7462         NumBytesNoStackCalls += 12;
7463         C.setCallInfo(MachineOutlinerDefault, 12);
7464         CandidatesWithoutStackFixups.push_back(C);
7465       }
7466 
7467       // If we outline this, we need to modify the stack. Pretend we don't
7468       // outline this by saving all of its bytes.
7469       else {
7470         NumBytesNoStackCalls += SequenceSize;
7471       }
7472     }
7473 
7474     // If there are no places where we have to save LR, then note that we
7475     // don't have to update the stack. Otherwise, give every candidate the
7476     // default call type, as long as it's safe to do so.
7477     if (!AllStackInstrsSafe ||
7478         NumBytesNoStackCalls <= RepeatedSequenceLocs.size() * 12) {
7479       RepeatedSequenceLocs = CandidatesWithoutStackFixups;
7480       FrameID = MachineOutlinerNoLRSave;
7481     } else {
7482       SetCandidateCallInfo(MachineOutlinerDefault, 12);
7483 
7484       // Bugzilla ID: 46767
7485       // TODO: Check if fixing up the stack more than once is safe so we can
7486       // outline these.
7487       //
7488       // An outline resulting in a caller that requires stack fixups at the
7489       // callsite to a callee that also requires stack fixups can happen when
7490       // there are no available registers at the candidate callsite for a
7491       // candidate that itself also has calls.
7492       //
7493       // In other words if function_containing_sequence in the following pseudo
7494       // assembly requires that we save LR at the point of the call, but there
7495       // are no available registers: in this case we save using SP and as a
7496       // result the SP offsets requires stack fixups by multiples of 16.
7497       //
7498       // function_containing_sequence:
7499       //   ...
7500       //   save LR to SP <- Requires stack instr fixups in OUTLINED_FUNCTION_N
7501       //   call OUTLINED_FUNCTION_N
7502       //   restore LR from SP
7503       //   ...
7504       //
7505       // OUTLINED_FUNCTION_N:
7506       //   save LR to SP <- Requires stack instr fixups in OUTLINED_FUNCTION_N
7507       //   ...
7508       //   bl foo
7509       //   restore LR from SP
7510       //   ret
7511       //
7512       // Because the code to handle more than one stack fixup does not
7513       // currently have the proper checks for legality, these cases will assert
7514       // in the AArch64 MachineOutliner. This is because the code to do this
7515       // needs more hardening, testing, better checks that generated code is
7516       // legal, etc and because it is only verified to handle a single pass of
7517       // stack fixup.
7518       //
7519       // The assert happens in AArch64InstrInfo::buildOutlinedFrame to catch
7520       // these cases until they are known to be handled. Bugzilla 46767 is
7521       // referenced in comments at the assert site.
7522       //
7523       // To avoid asserting (or generating non-legal code on noassert builds)
7524       // we remove all candidates which would need more than one stack fixup by
7525       // pruning the cases where the candidate has calls while also having no
7526       // available LR and having no available general purpose registers to copy
7527       // LR to (ie one extra stack save/restore).
7528       //
7529       if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
7530         erase_if(RepeatedSequenceLocs, [this, &TRI](outliner::Candidate &C) {
7531           return (std::any_of(
7532                      C.front(), std::next(C.back()),
7533                      [](const MachineInstr &MI) { return MI.isCall(); })) &&
7534                  (!C.isAvailableAcrossAndOutOfSeq(AArch64::LR, TRI) ||
7535                   !findRegisterToSaveLRTo(C));
7536         });
7537       }
7538     }
7539 
7540     // If we dropped all of the candidates, bail out here.
7541     if (RepeatedSequenceLocs.size() < 2) {
7542       RepeatedSequenceLocs.clear();
7543       return outliner::OutlinedFunction();
7544     }
7545   }
7546 
7547   // Does every candidate's MBB contain a call? If so, then we might have a call
7548   // in the range.
7549   if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
7550     // Check if the range contains a call. These require a save + restore of the
7551     // link register.
7552     bool ModStackToSaveLR = false;
7553     if (std::any_of(FirstCand.front(), FirstCand.back(),
7554                     [](const MachineInstr &MI) { return MI.isCall(); }))
7555       ModStackToSaveLR = true;
7556 
7557     // Handle the last instruction separately. If this is a tail call, then the
7558     // last instruction is a call. We don't want to save + restore in this case.
7559     // However, it could be possible that the last instruction is a call without
7560     // it being valid to tail call this sequence. We should consider this as
7561     // well.
7562     else if (FrameID != MachineOutlinerThunk &&
7563              FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall())
7564       ModStackToSaveLR = true;
7565 
7566     if (ModStackToSaveLR) {
7567       // We can't fix up the stack. Bail out.
7568       if (!AllStackInstrsSafe) {
7569         RepeatedSequenceLocs.clear();
7570         return outliner::OutlinedFunction();
7571       }
7572 
7573       // Save + restore LR.
7574       NumBytesToCreateFrame += 8;
7575     }
7576   }
7577 
7578   // If we have CFI instructions, we can only outline if the outlined section
7579   // can be a tail call
7580   if (FrameID != MachineOutlinerTailCall && CFICount > 0)
7581     return outliner::OutlinedFunction();
7582 
7583   return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
7584                                     NumBytesToCreateFrame, FrameID);
7585 }
7586 
isFunctionSafeToOutlineFrom(MachineFunction & MF,bool OutlineFromLinkOnceODRs) const7587 bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(
7588     MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
7589   const Function &F = MF.getFunction();
7590 
7591   // Can F be deduplicated by the linker? If it can, don't outline from it.
7592   if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
7593     return false;
7594 
7595   // Don't outline from functions with section markings; the program could
7596   // expect that all the code is in the named section.
7597   // FIXME: Allow outlining from multiple functions with the same section
7598   // marking.
7599   if (F.hasSection())
7600     return false;
7601 
7602   // Outlining from functions with redzones is unsafe since the outliner may
7603   // modify the stack. Check if hasRedZone is true or unknown; if yes, don't
7604   // outline from it.
7605   AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
7606   if (!AFI || AFI->hasRedZone().value_or(true))
7607     return false;
7608 
7609   // FIXME: Teach the outliner to generate/handle Windows unwind info.
7610   if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI())
7611     return false;
7612 
7613   // It's safe to outline from MF.
7614   return true;
7615 }
7616 
isMBBSafeToOutlineFrom(MachineBasicBlock & MBB,unsigned & Flags) const7617 bool AArch64InstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
7618                                               unsigned &Flags) const {
7619   if (!TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags))
7620     return false;
7621   // Check if LR is available through all of the MBB. If it's not, then set
7622   // a flag.
7623   assert(MBB.getParent()->getRegInfo().tracksLiveness() &&
7624          "Suitable Machine Function for outlining must track liveness");
7625   LiveRegUnits LRU(getRegisterInfo());
7626 
7627   for (MachineInstr &MI : llvm::reverse(MBB))
7628     LRU.accumulate(MI);
7629 
7630   // Check if each of the unsafe registers are available...
7631   bool W16AvailableInBlock = LRU.available(AArch64::W16);
7632   bool W17AvailableInBlock = LRU.available(AArch64::W17);
7633   bool NZCVAvailableInBlock = LRU.available(AArch64::NZCV);
7634 
7635   // If all of these are dead (and not live out), we know we don't have to check
7636   // them later.
7637   if (W16AvailableInBlock && W17AvailableInBlock && NZCVAvailableInBlock)
7638     Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
7639 
7640   // Now, add the live outs to the set.
7641   LRU.addLiveOuts(MBB);
7642 
7643   // If any of these registers is available in the MBB, but also a live out of
7644   // the block, then we know outlining is unsafe.
7645   if (W16AvailableInBlock && !LRU.available(AArch64::W16))
7646     return false;
7647   if (W17AvailableInBlock && !LRU.available(AArch64::W17))
7648     return false;
7649   if (NZCVAvailableInBlock && !LRU.available(AArch64::NZCV))
7650     return false;
7651 
7652   // Check if there's a call inside this MachineBasicBlock. If there is, then
7653   // set a flag.
7654   if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); }))
7655     Flags |= MachineOutlinerMBBFlags::HasCalls;
7656 
7657   MachineFunction *MF = MBB.getParent();
7658 
7659   // In the event that we outline, we may have to save LR. If there is an
7660   // available register in the MBB, then we'll always save LR there. Check if
7661   // this is true.
7662   bool CanSaveLR = false;
7663   const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
7664       MF->getSubtarget().getRegisterInfo());
7665 
7666   // Check if there is an available register across the sequence that we can
7667   // use.
7668   for (unsigned Reg : AArch64::GPR64RegClass) {
7669     if (!ARI->isReservedReg(*MF, Reg) && Reg != AArch64::LR &&
7670         Reg != AArch64::X16 && Reg != AArch64::X17 && LRU.available(Reg)) {
7671       CanSaveLR = true;
7672       break;
7673     }
7674   }
7675 
7676   // Check if we have a register we can save LR to, and if LR was used
7677   // somewhere. If both of those things are true, then we need to evaluate the
7678   // safety of outlining stack instructions later.
7679   if (!CanSaveLR && !LRU.available(AArch64::LR))
7680     Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
7681 
7682   return true;
7683 }
7684 
7685 outliner::InstrType
getOutliningType(MachineBasicBlock::iterator & MIT,unsigned Flags) const7686 AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
7687                                    unsigned Flags) const {
7688   MachineInstr &MI = *MIT;
7689   MachineBasicBlock *MBB = MI.getParent();
7690   MachineFunction *MF = MBB->getParent();
7691   AArch64FunctionInfo *FuncInfo = MF->getInfo<AArch64FunctionInfo>();
7692 
7693   // Don't outline anything used for return address signing. The outlined
7694   // function will get signed later if needed
7695   switch (MI.getOpcode()) {
7696   case AArch64::PACIASP:
7697   case AArch64::PACIBSP:
7698   case AArch64::AUTIASP:
7699   case AArch64::AUTIBSP:
7700   case AArch64::RETAA:
7701   case AArch64::RETAB:
7702   case AArch64::EMITBKEY:
7703     return outliner::InstrType::Illegal;
7704   }
7705 
7706   // Don't outline LOHs.
7707   if (FuncInfo->getLOHRelated().count(&MI))
7708     return outliner::InstrType::Illegal;
7709 
7710   // We can only outline these if we will tail call the outlined function, or
7711   // fix up the CFI offsets. Currently, CFI instructions are outlined only if
7712   // in a tail call.
7713   //
7714   // FIXME: If the proper fixups for the offset are implemented, this should be
7715   // possible.
7716   if (MI.isCFIInstruction())
7717     return outliner::InstrType::Legal;
7718 
7719   // Don't allow debug values to impact outlining type.
7720   if (MI.isDebugInstr() || MI.isIndirectDebugValue())
7721     return outliner::InstrType::Invisible;
7722 
7723   // At this point, KILL instructions don't really tell us much so we can go
7724   // ahead and skip over them.
7725   if (MI.isKill())
7726     return outliner::InstrType::Invisible;
7727 
7728   // Is this a terminator for a basic block?
7729   if (MI.isTerminator()) {
7730 
7731     // Is this the end of a function?
7732     if (MI.getParent()->succ_empty())
7733       return outliner::InstrType::Legal;
7734 
7735     // It's not, so don't outline it.
7736     return outliner::InstrType::Illegal;
7737   }
7738 
7739   // Make sure none of the operands are un-outlinable.
7740   for (const MachineOperand &MOP : MI.operands()) {
7741     if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
7742         MOP.isTargetIndex())
7743       return outliner::InstrType::Illegal;
7744 
7745     // If it uses LR or W30 explicitly, then don't touch it.
7746     if (MOP.isReg() && !MOP.isImplicit() &&
7747         (MOP.getReg() == AArch64::LR || MOP.getReg() == AArch64::W30))
7748       return outliner::InstrType::Illegal;
7749   }
7750 
7751   // Special cases for instructions that can always be outlined, but will fail
7752   // the later tests. e.g, ADRPs, which are PC-relative use LR, but can always
7753   // be outlined because they don't require a *specific* value to be in LR.
7754   if (MI.getOpcode() == AArch64::ADRP)
7755     return outliner::InstrType::Legal;
7756 
7757   // If MI is a call we might be able to outline it. We don't want to outline
7758   // any calls that rely on the position of items on the stack. When we outline
7759   // something containing a call, we have to emit a save and restore of LR in
7760   // the outlined function. Currently, this always happens by saving LR to the
7761   // stack. Thus, if we outline, say, half the parameters for a function call
7762   // plus the call, then we'll break the callee's expectations for the layout
7763   // of the stack.
7764   //
7765   // FIXME: Allow calls to functions which construct a stack frame, as long
7766   // as they don't access arguments on the stack.
7767   // FIXME: Figure out some way to analyze functions defined in other modules.
7768   // We should be able to compute the memory usage based on the IR calling
7769   // convention, even if we can't see the definition.
7770   if (MI.isCall()) {
7771     // Get the function associated with the call. Look at each operand and find
7772     // the one that represents the callee and get its name.
7773     const Function *Callee = nullptr;
7774     for (const MachineOperand &MOP : MI.operands()) {
7775       if (MOP.isGlobal()) {
7776         Callee = dyn_cast<Function>(MOP.getGlobal());
7777         break;
7778       }
7779     }
7780 
7781     // Never outline calls to mcount.  There isn't any rule that would require
7782     // this, but the Linux kernel's "ftrace" feature depends on it.
7783     if (Callee && Callee->getName() == "\01_mcount")
7784       return outliner::InstrType::Illegal;
7785 
7786     // If we don't know anything about the callee, assume it depends on the
7787     // stack layout of the caller. In that case, it's only legal to outline
7788     // as a tail-call. Explicitly list the call instructions we know about so we
7789     // don't get unexpected results with call pseudo-instructions.
7790     auto UnknownCallOutlineType = outliner::InstrType::Illegal;
7791     if (MI.getOpcode() == AArch64::BLR ||
7792         MI.getOpcode() == AArch64::BLRNoIP || MI.getOpcode() == AArch64::BL)
7793       UnknownCallOutlineType = outliner::InstrType::LegalTerminator;
7794 
7795     if (!Callee)
7796       return UnknownCallOutlineType;
7797 
7798     // We have a function we have information about. Check it if it's something
7799     // can safely outline.
7800     MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee);
7801 
7802     // We don't know what's going on with the callee at all. Don't touch it.
7803     if (!CalleeMF)
7804       return UnknownCallOutlineType;
7805 
7806     // Check if we know anything about the callee saves on the function. If we
7807     // don't, then don't touch it, since that implies that we haven't
7808     // computed anything about its stack frame yet.
7809     MachineFrameInfo &MFI = CalleeMF->getFrameInfo();
7810     if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 ||
7811         MFI.getNumObjects() > 0)
7812       return UnknownCallOutlineType;
7813 
7814     // At this point, we can say that CalleeMF ought to not pass anything on the
7815     // stack. Therefore, we can outline it.
7816     return outliner::InstrType::Legal;
7817   }
7818 
7819   // Don't outline positions.
7820   if (MI.isPosition())
7821     return outliner::InstrType::Illegal;
7822 
7823   // Don't touch the link register or W30.
7824   if (MI.readsRegister(AArch64::W30, &getRegisterInfo()) ||
7825       MI.modifiesRegister(AArch64::W30, &getRegisterInfo()))
7826     return outliner::InstrType::Illegal;
7827 
7828   // Don't outline BTI instructions, because that will prevent the outlining
7829   // site from being indirectly callable.
7830   if (MI.getOpcode() == AArch64::HINT) {
7831     int64_t Imm = MI.getOperand(0).getImm();
7832     if (Imm == 32 || Imm == 34 || Imm == 36 || Imm == 38)
7833       return outliner::InstrType::Illegal;
7834   }
7835 
7836   return outliner::InstrType::Legal;
7837 }
7838 
fixupPostOutline(MachineBasicBlock & MBB) const7839 void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
7840   for (MachineInstr &MI : MBB) {
7841     const MachineOperand *Base;
7842     unsigned Width;
7843     int64_t Offset;
7844     bool OffsetIsScalable;
7845 
7846     // Is this a load or store with an immediate offset with SP as the base?
7847     if (!MI.mayLoadOrStore() ||
7848         !getMemOperandWithOffsetWidth(MI, Base, Offset, OffsetIsScalable, Width,
7849                                       &RI) ||
7850         (Base->isReg() && Base->getReg() != AArch64::SP))
7851       continue;
7852 
7853     // It is, so we have to fix it up.
7854     TypeSize Scale(0U, false);
7855     int64_t Dummy1, Dummy2;
7856 
7857     MachineOperand &StackOffsetOperand = getMemOpBaseRegImmOfsOffsetOperand(MI);
7858     assert(StackOffsetOperand.isImm() && "Stack offset wasn't immediate!");
7859     getMemOpInfo(MI.getOpcode(), Scale, Width, Dummy1, Dummy2);
7860     assert(Scale != 0 && "Unexpected opcode!");
7861     assert(!OffsetIsScalable && "Expected offset to be a byte offset");
7862 
7863     // We've pushed the return address to the stack, so add 16 to the offset.
7864     // This is safe, since we already checked if it would overflow when we
7865     // checked if this instruction was legal to outline.
7866     int64_t NewImm = (Offset + 16) / (int64_t)Scale.getFixedValue();
7867     StackOffsetOperand.setImm(NewImm);
7868   }
7869 }
7870 
signOutlinedFunction(MachineFunction & MF,MachineBasicBlock & MBB,bool ShouldSignReturnAddr,bool ShouldSignReturnAddrWithBKey)7871 static void signOutlinedFunction(MachineFunction &MF, MachineBasicBlock &MBB,
7872                                  bool ShouldSignReturnAddr,
7873                                  bool ShouldSignReturnAddrWithBKey) {
7874   if (ShouldSignReturnAddr) {
7875     MachineBasicBlock::iterator MBBPAC = MBB.begin();
7876     MachineBasicBlock::iterator MBBAUT = MBB.getFirstTerminator();
7877     const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
7878     const TargetInstrInfo *TII = Subtarget.getInstrInfo();
7879     DebugLoc DL;
7880 
7881     if (MBBAUT != MBB.end())
7882       DL = MBBAUT->getDebugLoc();
7883 
7884     // At the very beginning of the basic block we insert the following
7885     // depending on the key type
7886     //
7887     // a_key:                   b_key:
7888     //    PACIASP                   EMITBKEY
7889     //    CFI_INSTRUCTION           PACIBSP
7890     //                              CFI_INSTRUCTION
7891     if (ShouldSignReturnAddrWithBKey) {
7892       BuildMI(MBB, MBBPAC, DebugLoc(), TII->get(AArch64::EMITBKEY))
7893           .setMIFlag(MachineInstr::FrameSetup);
7894     }
7895 
7896     BuildMI(MBB, MBBPAC, DebugLoc(),
7897             TII->get(ShouldSignReturnAddrWithBKey ? AArch64::PACIBSP
7898                                                   : AArch64::PACIASP))
7899         .setMIFlag(MachineInstr::FrameSetup);
7900 
7901     if (MF.getInfo<AArch64FunctionInfo>()->needsDwarfUnwindInfo(MF)) {
7902       unsigned CFIIndex =
7903           MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
7904       BuildMI(MBB, MBBPAC, DebugLoc(), TII->get(AArch64::CFI_INSTRUCTION))
7905           .addCFIIndex(CFIIndex)
7906           .setMIFlags(MachineInstr::FrameSetup);
7907     }
7908 
7909     // If v8.3a features are available we can replace a RET instruction by
7910     // RETAA or RETAB and omit the AUT instructions. In this case the
7911     // DW_CFA_AARCH64_negate_ra_state can't be emitted.
7912     if (Subtarget.hasPAuth() && MBBAUT != MBB.end() &&
7913         MBBAUT->getOpcode() == AArch64::RET) {
7914       BuildMI(MBB, MBBAUT, DL,
7915               TII->get(ShouldSignReturnAddrWithBKey ? AArch64::RETAB
7916                                                     : AArch64::RETAA))
7917           .copyImplicitOps(*MBBAUT);
7918       MBB.erase(MBBAUT);
7919     } else {
7920       BuildMI(MBB, MBBAUT, DL,
7921               TII->get(ShouldSignReturnAddrWithBKey ? AArch64::AUTIBSP
7922                                                     : AArch64::AUTIASP))
7923           .setMIFlag(MachineInstr::FrameDestroy);
7924       unsigned CFIIndexAuth =
7925           MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
7926       BuildMI(MBB, MBBAUT, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
7927           .addCFIIndex(CFIIndexAuth)
7928           .setMIFlags(MachineInstr::FrameDestroy);
7929     }
7930   }
7931 }
7932 
buildOutlinedFrame(MachineBasicBlock & MBB,MachineFunction & MF,const outliner::OutlinedFunction & OF) const7933 void AArch64InstrInfo::buildOutlinedFrame(
7934     MachineBasicBlock &MBB, MachineFunction &MF,
7935     const outliner::OutlinedFunction &OF) const {
7936 
7937   AArch64FunctionInfo *FI = MF.getInfo<AArch64FunctionInfo>();
7938 
7939   if (OF.FrameConstructionID == MachineOutlinerTailCall)
7940     FI->setOutliningStyle("Tail Call");
7941   else if (OF.FrameConstructionID == MachineOutlinerThunk) {
7942     // For thunk outlining, rewrite the last instruction from a call to a
7943     // tail-call.
7944     MachineInstr *Call = &*--MBB.instr_end();
7945     unsigned TailOpcode;
7946     if (Call->getOpcode() == AArch64::BL) {
7947       TailOpcode = AArch64::TCRETURNdi;
7948     } else {
7949       assert(Call->getOpcode() == AArch64::BLR ||
7950              Call->getOpcode() == AArch64::BLRNoIP);
7951       TailOpcode = AArch64::TCRETURNriALL;
7952     }
7953     MachineInstr *TC = BuildMI(MF, DebugLoc(), get(TailOpcode))
7954                            .add(Call->getOperand(0))
7955                            .addImm(0);
7956     MBB.insert(MBB.end(), TC);
7957     Call->eraseFromParent();
7958 
7959     FI->setOutliningStyle("Thunk");
7960   }
7961 
7962   bool IsLeafFunction = true;
7963 
7964   // Is there a call in the outlined range?
7965   auto IsNonTailCall = [](const MachineInstr &MI) {
7966     return MI.isCall() && !MI.isReturn();
7967   };
7968 
7969   if (llvm::any_of(MBB.instrs(), IsNonTailCall)) {
7970     // Fix up the instructions in the range, since we're going to modify the
7971     // stack.
7972 
7973     // Bugzilla ID: 46767
7974     // TODO: Check if fixing up twice is safe so we can outline these.
7975     assert(OF.FrameConstructionID != MachineOutlinerDefault &&
7976            "Can only fix up stack references once");
7977     fixupPostOutline(MBB);
7978 
7979     IsLeafFunction = false;
7980 
7981     // LR has to be a live in so that we can save it.
7982     if (!MBB.isLiveIn(AArch64::LR))
7983       MBB.addLiveIn(AArch64::LR);
7984 
7985     MachineBasicBlock::iterator It = MBB.begin();
7986     MachineBasicBlock::iterator Et = MBB.end();
7987 
7988     if (OF.FrameConstructionID == MachineOutlinerTailCall ||
7989         OF.FrameConstructionID == MachineOutlinerThunk)
7990       Et = std::prev(MBB.end());
7991 
7992     // Insert a save before the outlined region
7993     MachineInstr *STRXpre = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
7994                                 .addReg(AArch64::SP, RegState::Define)
7995                                 .addReg(AArch64::LR)
7996                                 .addReg(AArch64::SP)
7997                                 .addImm(-16);
7998     It = MBB.insert(It, STRXpre);
7999 
8000     if (MF.getInfo<AArch64FunctionInfo>()->needsDwarfUnwindInfo(MF)) {
8001       const TargetSubtargetInfo &STI = MF.getSubtarget();
8002       const MCRegisterInfo *MRI = STI.getRegisterInfo();
8003       unsigned DwarfReg = MRI->getDwarfRegNum(AArch64::LR, true);
8004 
8005       // Add a CFI saying the stack was moved 16 B down.
8006       int64_t StackPosEntry =
8007           MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 16));
8008       BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
8009           .addCFIIndex(StackPosEntry)
8010           .setMIFlags(MachineInstr::FrameSetup);
8011 
8012       // Add a CFI saying that the LR that we want to find is now 16 B higher
8013       // than before.
8014       int64_t LRPosEntry = MF.addFrameInst(
8015           MCCFIInstruction::createOffset(nullptr, DwarfReg, -16));
8016       BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
8017           .addCFIIndex(LRPosEntry)
8018           .setMIFlags(MachineInstr::FrameSetup);
8019     }
8020 
8021     // Insert a restore before the terminator for the function.
8022     MachineInstr *LDRXpost = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
8023                                  .addReg(AArch64::SP, RegState::Define)
8024                                  .addReg(AArch64::LR, RegState::Define)
8025                                  .addReg(AArch64::SP)
8026                                  .addImm(16);
8027     Et = MBB.insert(Et, LDRXpost);
8028   }
8029 
8030   // If a bunch of candidates reach this point they must agree on their return
8031   // address signing. It is therefore enough to just consider the signing
8032   // behaviour of one of them
8033   const auto &MFI = *OF.Candidates.front().getMF()->getInfo<AArch64FunctionInfo>();
8034   bool ShouldSignReturnAddr = MFI.shouldSignReturnAddress(!IsLeafFunction);
8035 
8036   // a_key is the default
8037   bool ShouldSignReturnAddrWithBKey = MFI.shouldSignWithBKey();
8038 
8039   // If this is a tail call outlined function, then there's already a return.
8040   if (OF.FrameConstructionID == MachineOutlinerTailCall ||
8041       OF.FrameConstructionID == MachineOutlinerThunk) {
8042     signOutlinedFunction(MF, MBB, ShouldSignReturnAddr,
8043                          ShouldSignReturnAddrWithBKey);
8044     return;
8045   }
8046 
8047   // It's not a tail call, so we have to insert the return ourselves.
8048 
8049   // LR has to be a live in so that we can return to it.
8050   if (!MBB.isLiveIn(AArch64::LR))
8051     MBB.addLiveIn(AArch64::LR);
8052 
8053   MachineInstr *ret = BuildMI(MF, DebugLoc(), get(AArch64::RET))
8054                           .addReg(AArch64::LR);
8055   MBB.insert(MBB.end(), ret);
8056 
8057   signOutlinedFunction(MF, MBB, ShouldSignReturnAddr,
8058                        ShouldSignReturnAddrWithBKey);
8059 
8060   FI->setOutliningStyle("Function");
8061 
8062   // Did we have to modify the stack by saving the link register?
8063   if (OF.FrameConstructionID != MachineOutlinerDefault)
8064     return;
8065 
8066   // We modified the stack.
8067   // Walk over the basic block and fix up all the stack accesses.
8068   fixupPostOutline(MBB);
8069 }
8070 
insertOutlinedCall(Module & M,MachineBasicBlock & MBB,MachineBasicBlock::iterator & It,MachineFunction & MF,outliner::Candidate & C) const8071 MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall(
8072     Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
8073     MachineFunction &MF, outliner::Candidate &C) const {
8074 
8075   // Are we tail calling?
8076   if (C.CallConstructionID == MachineOutlinerTailCall) {
8077     // If yes, then we can just branch to the label.
8078     It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::TCRETURNdi))
8079                             .addGlobalAddress(M.getNamedValue(MF.getName()))
8080                             .addImm(0));
8081     return It;
8082   }
8083 
8084   // Are we saving the link register?
8085   if (C.CallConstructionID == MachineOutlinerNoLRSave ||
8086       C.CallConstructionID == MachineOutlinerThunk) {
8087     // No, so just insert the call.
8088     It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
8089                             .addGlobalAddress(M.getNamedValue(MF.getName())));
8090     return It;
8091   }
8092 
8093   // We want to return the spot where we inserted the call.
8094   MachineBasicBlock::iterator CallPt;
8095 
8096   // Instructions for saving and restoring LR around the call instruction we're
8097   // going to insert.
8098   MachineInstr *Save;
8099   MachineInstr *Restore;
8100   // Can we save to a register?
8101   if (C.CallConstructionID == MachineOutlinerRegSave) {
8102     // FIXME: This logic should be sunk into a target-specific interface so that
8103     // we don't have to recompute the register.
8104     Register Reg = findRegisterToSaveLRTo(C);
8105     assert(Reg && "No callee-saved register available?");
8106 
8107     // LR has to be a live in so that we can save it.
8108     if (!MBB.isLiveIn(AArch64::LR))
8109       MBB.addLiveIn(AArch64::LR);
8110 
8111     // Save and restore LR from Reg.
8112     Save = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), Reg)
8113                .addReg(AArch64::XZR)
8114                .addReg(AArch64::LR)
8115                .addImm(0);
8116     Restore = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), AArch64::LR)
8117                 .addReg(AArch64::XZR)
8118                 .addReg(Reg)
8119                 .addImm(0);
8120   } else {
8121     // We have the default case. Save and restore from SP.
8122     Save = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
8123                .addReg(AArch64::SP, RegState::Define)
8124                .addReg(AArch64::LR)
8125                .addReg(AArch64::SP)
8126                .addImm(-16);
8127     Restore = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
8128                   .addReg(AArch64::SP, RegState::Define)
8129                   .addReg(AArch64::LR, RegState::Define)
8130                   .addReg(AArch64::SP)
8131                   .addImm(16);
8132   }
8133 
8134   It = MBB.insert(It, Save);
8135   It++;
8136 
8137   // Insert the call.
8138   It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
8139                           .addGlobalAddress(M.getNamedValue(MF.getName())));
8140   CallPt = It;
8141   It++;
8142 
8143   It = MBB.insert(It, Restore);
8144   return CallPt;
8145 }
8146 
shouldOutlineFromFunctionByDefault(MachineFunction & MF) const8147 bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
8148   MachineFunction &MF) const {
8149   return MF.getFunction().hasMinSize();
8150 }
8151 
8152 std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr & MI) const8153 AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
8154 
8155   // AArch64::ORRWrs and AArch64::ORRXrs with WZR/XZR reg
8156   // and zero immediate operands used as an alias for mov instruction.
8157   if (MI.getOpcode() == AArch64::ORRWrs &&
8158       MI.getOperand(1).getReg() == AArch64::WZR &&
8159       MI.getOperand(3).getImm() == 0x0) {
8160     return DestSourcePair{MI.getOperand(0), MI.getOperand(2)};
8161   }
8162 
8163   if (MI.getOpcode() == AArch64::ORRXrs &&
8164       MI.getOperand(1).getReg() == AArch64::XZR &&
8165       MI.getOperand(3).getImm() == 0x0) {
8166     return DestSourcePair{MI.getOperand(0), MI.getOperand(2)};
8167   }
8168 
8169   return std::nullopt;
8170 }
8171 
8172 std::optional<RegImmPair>
isAddImmediate(const MachineInstr & MI,Register Reg) const8173 AArch64InstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const {
8174   int Sign = 1;
8175   int64_t Offset = 0;
8176 
8177   // TODO: Handle cases where Reg is a super- or sub-register of the
8178   // destination register.
8179   const MachineOperand &Op0 = MI.getOperand(0);
8180   if (!Op0.isReg() || Reg != Op0.getReg())
8181     return std::nullopt;
8182 
8183   switch (MI.getOpcode()) {
8184   default:
8185     return std::nullopt;
8186   case AArch64::SUBWri:
8187   case AArch64::SUBXri:
8188   case AArch64::SUBSWri:
8189   case AArch64::SUBSXri:
8190     Sign *= -1;
8191     [[fallthrough]];
8192   case AArch64::ADDSWri:
8193   case AArch64::ADDSXri:
8194   case AArch64::ADDWri:
8195   case AArch64::ADDXri: {
8196     // TODO: Third operand can be global address (usually some string).
8197     if (!MI.getOperand(0).isReg() || !MI.getOperand(1).isReg() ||
8198         !MI.getOperand(2).isImm())
8199       return std::nullopt;
8200     int Shift = MI.getOperand(3).getImm();
8201     assert((Shift == 0 || Shift == 12) && "Shift can be either 0 or 12");
8202     Offset = Sign * (MI.getOperand(2).getImm() << Shift);
8203   }
8204   }
8205   return RegImmPair{MI.getOperand(1).getReg(), Offset};
8206 }
8207 
8208 /// If the given ORR instruction is a copy, and \p DescribedReg overlaps with
8209 /// the destination register then, if possible, describe the value in terms of
8210 /// the source register.
8211 static std::optional<ParamLoadedValue>
describeORRLoadedValue(const MachineInstr & MI,Register DescribedReg,const TargetInstrInfo * TII,const TargetRegisterInfo * TRI)8212 describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg,
8213                        const TargetInstrInfo *TII,
8214                        const TargetRegisterInfo *TRI) {
8215   auto DestSrc = TII->isCopyInstr(MI);
8216   if (!DestSrc)
8217     return std::nullopt;
8218 
8219   Register DestReg = DestSrc->Destination->getReg();
8220   Register SrcReg = DestSrc->Source->getReg();
8221 
8222   auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
8223 
8224   // If the described register is the destination, just return the source.
8225   if (DestReg == DescribedReg)
8226     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8227 
8228   // ORRWrs zero-extends to 64-bits, so we need to consider such cases.
8229   if (MI.getOpcode() == AArch64::ORRWrs &&
8230       TRI->isSuperRegister(DestReg, DescribedReg))
8231     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
8232 
8233   // We may need to describe the lower part of a ORRXrs move.
8234   if (MI.getOpcode() == AArch64::ORRXrs &&
8235       TRI->isSubRegister(DestReg, DescribedReg)) {
8236     Register SrcSubReg = TRI->getSubReg(SrcReg, AArch64::sub_32);
8237     return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
8238   }
8239 
8240   assert(!TRI->isSuperOrSubRegisterEq(DestReg, DescribedReg) &&
8241          "Unhandled ORR[XW]rs copy case");
8242 
8243   return std::nullopt;
8244 }
8245 
8246 std::optional<ParamLoadedValue>
describeLoadedValue(const MachineInstr & MI,Register Reg) const8247 AArch64InstrInfo::describeLoadedValue(const MachineInstr &MI,
8248                                       Register Reg) const {
8249   const MachineFunction *MF = MI.getMF();
8250   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
8251   switch (MI.getOpcode()) {
8252   case AArch64::MOVZWi:
8253   case AArch64::MOVZXi: {
8254     // MOVZWi may be used for producing zero-extended 32-bit immediates in
8255     // 64-bit parameters, so we need to consider super-registers.
8256     if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
8257       return std::nullopt;
8258 
8259     if (!MI.getOperand(1).isImm())
8260       return std::nullopt;
8261     int64_t Immediate = MI.getOperand(1).getImm();
8262     int Shift = MI.getOperand(2).getImm();
8263     return ParamLoadedValue(MachineOperand::CreateImm(Immediate << Shift),
8264                             nullptr);
8265   }
8266   case AArch64::ORRWrs:
8267   case AArch64::ORRXrs:
8268     return describeORRLoadedValue(MI, Reg, this, TRI);
8269   }
8270 
8271   return TargetInstrInfo::describeLoadedValue(MI, Reg);
8272 }
8273 
isExtendLikelyToBeFolded(MachineInstr & ExtMI,MachineRegisterInfo & MRI) const8274 bool AArch64InstrInfo::isExtendLikelyToBeFolded(
8275     MachineInstr &ExtMI, MachineRegisterInfo &MRI) const {
8276   assert(ExtMI.getOpcode() == TargetOpcode::G_SEXT ||
8277          ExtMI.getOpcode() == TargetOpcode::G_ZEXT ||
8278          ExtMI.getOpcode() == TargetOpcode::G_ANYEXT);
8279 
8280   // Anyexts are nops.
8281   if (ExtMI.getOpcode() == TargetOpcode::G_ANYEXT)
8282     return true;
8283 
8284   Register DefReg = ExtMI.getOperand(0).getReg();
8285   if (!MRI.hasOneNonDBGUse(DefReg))
8286     return false;
8287 
8288   // It's likely that a sext/zext as a G_PTR_ADD offset will be folded into an
8289   // addressing mode.
8290   auto *UserMI = &*MRI.use_instr_nodbg_begin(DefReg);
8291   return UserMI->getOpcode() == TargetOpcode::G_PTR_ADD;
8292 }
8293 
getElementSizeForOpcode(unsigned Opc) const8294 uint64_t AArch64InstrInfo::getElementSizeForOpcode(unsigned Opc) const {
8295   return get(Opc).TSFlags & AArch64::ElementSizeMask;
8296 }
8297 
isPTestLikeOpcode(unsigned Opc) const8298 bool AArch64InstrInfo::isPTestLikeOpcode(unsigned Opc) const {
8299   return get(Opc).TSFlags & AArch64::InstrFlagIsPTestLike;
8300 }
8301 
isWhileOpcode(unsigned Opc) const8302 bool AArch64InstrInfo::isWhileOpcode(unsigned Opc) const {
8303   return get(Opc).TSFlags & AArch64::InstrFlagIsWhile;
8304 }
8305 
8306 unsigned int
getTailDuplicateSize(CodeGenOpt::Level OptLevel) const8307 AArch64InstrInfo::getTailDuplicateSize(CodeGenOpt::Level OptLevel) const {
8308   return OptLevel >= CodeGenOpt::Aggressive ? 6 : 2;
8309 }
8310 
getBLRCallOpcode(const MachineFunction & MF)8311 unsigned llvm::getBLRCallOpcode(const MachineFunction &MF) {
8312   if (MF.getSubtarget<AArch64Subtarget>().hardenSlsBlr())
8313     return AArch64::BLRNoIP;
8314   else
8315     return AArch64::BLR;
8316 }
8317 
8318 #define GET_INSTRINFO_HELPERS
8319 #define GET_INSTRMAP_INFO
8320 #include "AArch64GenInstrInfo.inc"
8321