1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "AArch64InstrInfo.h"
15 #include "AArch64MachineCombinerPattern.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
26
27 using namespace llvm;
28
29 #define GET_INSTRINFO_CTOR_DTOR
30 #include "AArch64GenInstrInfo.inc"
31
AArch64InstrInfo(const AArch64Subtarget & STI)32 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
33 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
34 RI(this, &STI), Subtarget(STI) {}
35
36 /// GetInstSize - Return the number of bytes of code the specified
37 /// instruction may be. This returns the maximum number of bytes.
GetInstSizeInBytes(const MachineInstr * MI) const38 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
39 const MachineBasicBlock &MBB = *MI->getParent();
40 const MachineFunction *MF = MBB.getParent();
41 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
42
43 if (MI->getOpcode() == AArch64::INLINEASM)
44 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
45
46 const MCInstrDesc &Desc = MI->getDesc();
47 switch (Desc.getOpcode()) {
48 default:
49 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
50 return 4;
51 case TargetOpcode::DBG_VALUE:
52 case TargetOpcode::EH_LABEL:
53 case TargetOpcode::IMPLICIT_DEF:
54 case TargetOpcode::KILL:
55 return 0;
56 }
57
58 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
59 }
60
parseCondBranch(MachineInstr * LastInst,MachineBasicBlock * & Target,SmallVectorImpl<MachineOperand> & Cond)61 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
62 SmallVectorImpl<MachineOperand> &Cond) {
63 // Block ends with fall-through condbranch.
64 switch (LastInst->getOpcode()) {
65 default:
66 llvm_unreachable("Unknown branch instruction?");
67 case AArch64::Bcc:
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(LastInst->getOperand(0));
70 break;
71 case AArch64::CBZW:
72 case AArch64::CBZX:
73 case AArch64::CBNZW:
74 case AArch64::CBNZX:
75 Target = LastInst->getOperand(1).getMBB();
76 Cond.push_back(MachineOperand::CreateImm(-1));
77 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
78 Cond.push_back(LastInst->getOperand(0));
79 break;
80 case AArch64::TBZW:
81 case AArch64::TBZX:
82 case AArch64::TBNZW:
83 case AArch64::TBNZX:
84 Target = LastInst->getOperand(2).getMBB();
85 Cond.push_back(MachineOperand::CreateImm(-1));
86 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
87 Cond.push_back(LastInst->getOperand(0));
88 Cond.push_back(LastInst->getOperand(1));
89 }
90 }
91
92 // Branch analysis.
AnalyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const93 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
94 MachineBasicBlock *&TBB,
95 MachineBasicBlock *&FBB,
96 SmallVectorImpl<MachineOperand> &Cond,
97 bool AllowModify) const {
98 // If the block has no terminators, it just falls into the block after it.
99 MachineBasicBlock::iterator I = MBB.end();
100 if (I == MBB.begin())
101 return false;
102 --I;
103 while (I->isDebugValue()) {
104 if (I == MBB.begin())
105 return false;
106 --I;
107 }
108 if (!isUnpredicatedTerminator(I))
109 return false;
110
111 // Get the last instruction in the block.
112 MachineInstr *LastInst = I;
113
114 // If there is only one terminator instruction, process it.
115 unsigned LastOpc = LastInst->getOpcode();
116 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
117 if (isUncondBranchOpcode(LastOpc)) {
118 TBB = LastInst->getOperand(0).getMBB();
119 return false;
120 }
121 if (isCondBranchOpcode(LastOpc)) {
122 // Block ends with fall-through condbranch.
123 parseCondBranch(LastInst, TBB, Cond);
124 return false;
125 }
126 return true; // Can't handle indirect branch.
127 }
128
129 // Get the instruction before it if it is a terminator.
130 MachineInstr *SecondLastInst = I;
131 unsigned SecondLastOpc = SecondLastInst->getOpcode();
132
133 // If AllowModify is true and the block ends with two or more unconditional
134 // branches, delete all but the first unconditional branch.
135 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
136 while (isUncondBranchOpcode(SecondLastOpc)) {
137 LastInst->eraseFromParent();
138 LastInst = SecondLastInst;
139 LastOpc = LastInst->getOpcode();
140 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
141 // Return now the only terminator is an unconditional branch.
142 TBB = LastInst->getOperand(0).getMBB();
143 return false;
144 } else {
145 SecondLastInst = I;
146 SecondLastOpc = SecondLastInst->getOpcode();
147 }
148 }
149 }
150
151 // If there are three terminators, we don't know what sort of block this is.
152 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
153 return true;
154
155 // If the block ends with a B and a Bcc, handle it.
156 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
157 parseCondBranch(SecondLastInst, TBB, Cond);
158 FBB = LastInst->getOperand(0).getMBB();
159 return false;
160 }
161
162 // If the block ends with two unconditional branches, handle it. The second
163 // one is not executed, so remove it.
164 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
165 TBB = SecondLastInst->getOperand(0).getMBB();
166 I = LastInst;
167 if (AllowModify)
168 I->eraseFromParent();
169 return false;
170 }
171
172 // ...likewise if it ends with an indirect branch followed by an unconditional
173 // branch.
174 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
175 I = LastInst;
176 if (AllowModify)
177 I->eraseFromParent();
178 return true;
179 }
180
181 // Otherwise, can't handle this.
182 return true;
183 }
184
ReverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const185 bool AArch64InstrInfo::ReverseBranchCondition(
186 SmallVectorImpl<MachineOperand> &Cond) const {
187 if (Cond[0].getImm() != -1) {
188 // Regular Bcc
189 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
190 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
191 } else {
192 // Folded compare-and-branch
193 switch (Cond[1].getImm()) {
194 default:
195 llvm_unreachable("Unknown conditional branch!");
196 case AArch64::CBZW:
197 Cond[1].setImm(AArch64::CBNZW);
198 break;
199 case AArch64::CBNZW:
200 Cond[1].setImm(AArch64::CBZW);
201 break;
202 case AArch64::CBZX:
203 Cond[1].setImm(AArch64::CBNZX);
204 break;
205 case AArch64::CBNZX:
206 Cond[1].setImm(AArch64::CBZX);
207 break;
208 case AArch64::TBZW:
209 Cond[1].setImm(AArch64::TBNZW);
210 break;
211 case AArch64::TBNZW:
212 Cond[1].setImm(AArch64::TBZW);
213 break;
214 case AArch64::TBZX:
215 Cond[1].setImm(AArch64::TBNZX);
216 break;
217 case AArch64::TBNZX:
218 Cond[1].setImm(AArch64::TBZX);
219 break;
220 }
221 }
222
223 return false;
224 }
225
RemoveBranch(MachineBasicBlock & MBB) const226 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
227 MachineBasicBlock::iterator I = MBB.end();
228 if (I == MBB.begin())
229 return 0;
230 --I;
231 while (I->isDebugValue()) {
232 if (I == MBB.begin())
233 return 0;
234 --I;
235 }
236 if (!isUncondBranchOpcode(I->getOpcode()) &&
237 !isCondBranchOpcode(I->getOpcode()))
238 return 0;
239
240 // Remove the branch.
241 I->eraseFromParent();
242
243 I = MBB.end();
244
245 if (I == MBB.begin())
246 return 1;
247 --I;
248 if (!isCondBranchOpcode(I->getOpcode()))
249 return 1;
250
251 // Remove the branch.
252 I->eraseFromParent();
253 return 2;
254 }
255
instantiateCondBranch(MachineBasicBlock & MBB,DebugLoc DL,MachineBasicBlock * TBB,const SmallVectorImpl<MachineOperand> & Cond) const256 void AArch64InstrInfo::instantiateCondBranch(
257 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
258 const SmallVectorImpl<MachineOperand> &Cond) const {
259 if (Cond[0].getImm() != -1) {
260 // Regular Bcc
261 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
262 } else {
263 // Folded compare-and-branch
264 // Note that we use addOperand instead of addReg to keep the flags.
265 const MachineInstrBuilder MIB =
266 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
267 if (Cond.size() > 3)
268 MIB.addImm(Cond[3].getImm());
269 MIB.addMBB(TBB);
270 }
271 }
272
InsertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,const SmallVectorImpl<MachineOperand> & Cond,DebugLoc DL) const273 unsigned AArch64InstrInfo::InsertBranch(
274 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
275 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
276 // Shouldn't be a fall through.
277 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
278
279 if (!FBB) {
280 if (Cond.empty()) // Unconditional branch?
281 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
282 else
283 instantiateCondBranch(MBB, DL, TBB, Cond);
284 return 1;
285 }
286
287 // Two-way conditional branch.
288 instantiateCondBranch(MBB, DL, TBB, Cond);
289 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
290 return 2;
291 }
292
293 // Find the original register that VReg is copied from.
removeCopies(const MachineRegisterInfo & MRI,unsigned VReg)294 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
295 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
296 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
297 if (!DefMI->isFullCopy())
298 return VReg;
299 VReg = DefMI->getOperand(1).getReg();
300 }
301 return VReg;
302 }
303
304 // Determine if VReg is defined by an instruction that can be folded into a
305 // csel instruction. If so, return the folded opcode, and the replacement
306 // register.
canFoldIntoCSel(const MachineRegisterInfo & MRI,unsigned VReg,unsigned * NewVReg=nullptr)307 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
308 unsigned *NewVReg = nullptr) {
309 VReg = removeCopies(MRI, VReg);
310 if (!TargetRegisterInfo::isVirtualRegister(VReg))
311 return 0;
312
313 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
314 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
315 unsigned Opc = 0;
316 unsigned SrcOpNum = 0;
317 switch (DefMI->getOpcode()) {
318 case AArch64::ADDSXri:
319 case AArch64::ADDSWri:
320 // if NZCV is used, do not fold.
321 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
322 return 0;
323 // fall-through to ADDXri and ADDWri.
324 case AArch64::ADDXri:
325 case AArch64::ADDWri:
326 // add x, 1 -> csinc.
327 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
328 DefMI->getOperand(3).getImm() != 0)
329 return 0;
330 SrcOpNum = 1;
331 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
332 break;
333
334 case AArch64::ORNXrr:
335 case AArch64::ORNWrr: {
336 // not x -> csinv, represented as orn dst, xzr, src.
337 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
338 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
339 return 0;
340 SrcOpNum = 2;
341 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
342 break;
343 }
344
345 case AArch64::SUBSXrr:
346 case AArch64::SUBSWrr:
347 // if NZCV is used, do not fold.
348 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
349 return 0;
350 // fall-through to SUBXrr and SUBWrr.
351 case AArch64::SUBXrr:
352 case AArch64::SUBWrr: {
353 // neg x -> csneg, represented as sub dst, xzr, src.
354 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
355 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
356 return 0;
357 SrcOpNum = 2;
358 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
359 break;
360 }
361 default:
362 return 0;
363 }
364 assert(Opc && SrcOpNum && "Missing parameters");
365
366 if (NewVReg)
367 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
368 return Opc;
369 }
370
canInsertSelect(const MachineBasicBlock & MBB,const SmallVectorImpl<MachineOperand> & Cond,unsigned TrueReg,unsigned FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles) const371 bool AArch64InstrInfo::canInsertSelect(
372 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
373 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
374 int &FalseCycles) const {
375 // Check register classes.
376 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
377 const TargetRegisterClass *RC =
378 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
379 if (!RC)
380 return false;
381
382 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
383 unsigned ExtraCondLat = Cond.size() != 1;
384
385 // GPRs are handled by csel.
386 // FIXME: Fold in x+1, -x, and ~x when applicable.
387 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
388 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
389 // Single-cycle csel, csinc, csinv, and csneg.
390 CondCycles = 1 + ExtraCondLat;
391 TrueCycles = FalseCycles = 1;
392 if (canFoldIntoCSel(MRI, TrueReg))
393 TrueCycles = 0;
394 else if (canFoldIntoCSel(MRI, FalseReg))
395 FalseCycles = 0;
396 return true;
397 }
398
399 // Scalar floating point is handled by fcsel.
400 // FIXME: Form fabs, fmin, and fmax when applicable.
401 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
402 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
403 CondCycles = 5 + ExtraCondLat;
404 TrueCycles = FalseCycles = 2;
405 return true;
406 }
407
408 // Can't do vectors.
409 return false;
410 }
411
insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,DebugLoc DL,unsigned DstReg,const SmallVectorImpl<MachineOperand> & Cond,unsigned TrueReg,unsigned FalseReg) const412 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
413 MachineBasicBlock::iterator I, DebugLoc DL,
414 unsigned DstReg,
415 const SmallVectorImpl<MachineOperand> &Cond,
416 unsigned TrueReg, unsigned FalseReg) const {
417 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
418
419 // Parse the condition code, see parseCondBranch() above.
420 AArch64CC::CondCode CC;
421 switch (Cond.size()) {
422 default:
423 llvm_unreachable("Unknown condition opcode in Cond");
424 case 1: // b.cc
425 CC = AArch64CC::CondCode(Cond[0].getImm());
426 break;
427 case 3: { // cbz/cbnz
428 // We must insert a compare against 0.
429 bool Is64Bit;
430 switch (Cond[1].getImm()) {
431 default:
432 llvm_unreachable("Unknown branch opcode in Cond");
433 case AArch64::CBZW:
434 Is64Bit = 0;
435 CC = AArch64CC::EQ;
436 break;
437 case AArch64::CBZX:
438 Is64Bit = 1;
439 CC = AArch64CC::EQ;
440 break;
441 case AArch64::CBNZW:
442 Is64Bit = 0;
443 CC = AArch64CC::NE;
444 break;
445 case AArch64::CBNZX:
446 Is64Bit = 1;
447 CC = AArch64CC::NE;
448 break;
449 }
450 unsigned SrcReg = Cond[2].getReg();
451 if (Is64Bit) {
452 // cmp reg, #0 is actually subs xzr, reg, #0.
453 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
454 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
455 .addReg(SrcReg)
456 .addImm(0)
457 .addImm(0);
458 } else {
459 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
460 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
461 .addReg(SrcReg)
462 .addImm(0)
463 .addImm(0);
464 }
465 break;
466 }
467 case 4: { // tbz/tbnz
468 // We must insert a tst instruction.
469 switch (Cond[1].getImm()) {
470 default:
471 llvm_unreachable("Unknown branch opcode in Cond");
472 case AArch64::TBZW:
473 case AArch64::TBZX:
474 CC = AArch64CC::EQ;
475 break;
476 case AArch64::TBNZW:
477 case AArch64::TBNZX:
478 CC = AArch64CC::NE;
479 break;
480 }
481 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
482 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
483 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
484 .addReg(Cond[2].getReg())
485 .addImm(
486 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
487 else
488 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
489 .addReg(Cond[2].getReg())
490 .addImm(
491 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
492 break;
493 }
494 }
495
496 unsigned Opc = 0;
497 const TargetRegisterClass *RC = nullptr;
498 bool TryFold = false;
499 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
500 RC = &AArch64::GPR64RegClass;
501 Opc = AArch64::CSELXr;
502 TryFold = true;
503 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
504 RC = &AArch64::GPR32RegClass;
505 Opc = AArch64::CSELWr;
506 TryFold = true;
507 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
508 RC = &AArch64::FPR64RegClass;
509 Opc = AArch64::FCSELDrrr;
510 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
511 RC = &AArch64::FPR32RegClass;
512 Opc = AArch64::FCSELSrrr;
513 }
514 assert(RC && "Unsupported regclass");
515
516 // Try folding simple instructions into the csel.
517 if (TryFold) {
518 unsigned NewVReg = 0;
519 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
520 if (FoldedOpc) {
521 // The folded opcodes csinc, csinc and csneg apply the operation to
522 // FalseReg, so we need to invert the condition.
523 CC = AArch64CC::getInvertedCondCode(CC);
524 TrueReg = FalseReg;
525 } else
526 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
527
528 // Fold the operation. Leave any dead instructions for DCE to clean up.
529 if (FoldedOpc) {
530 FalseReg = NewVReg;
531 Opc = FoldedOpc;
532 // The extends the live range of NewVReg.
533 MRI.clearKillFlags(NewVReg);
534 }
535 }
536
537 // Pull all virtual register into the appropriate class.
538 MRI.constrainRegClass(TrueReg, RC);
539 MRI.constrainRegClass(FalseReg, RC);
540
541 // Insert the csel.
542 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
543 CC);
544 }
545
546 // FIXME: this implementation should be micro-architecture dependent, so a
547 // micro-architecture target hook should be introduced here in future.
isAsCheapAsAMove(const MachineInstr * MI) const548 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
549 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
550 return MI->isAsCheapAsAMove();
551
552 switch (MI->getOpcode()) {
553 default:
554 return false;
555
556 // add/sub on register without shift
557 case AArch64::ADDWri:
558 case AArch64::ADDXri:
559 case AArch64::SUBWri:
560 case AArch64::SUBXri:
561 return (MI->getOperand(3).getImm() == 0);
562
563 // logical ops on immediate
564 case AArch64::ANDWri:
565 case AArch64::ANDXri:
566 case AArch64::EORWri:
567 case AArch64::EORXri:
568 case AArch64::ORRWri:
569 case AArch64::ORRXri:
570 return true;
571
572 // logical ops on register without shift
573 case AArch64::ANDWrr:
574 case AArch64::ANDXrr:
575 case AArch64::BICWrr:
576 case AArch64::BICXrr:
577 case AArch64::EONWrr:
578 case AArch64::EONXrr:
579 case AArch64::EORWrr:
580 case AArch64::EORXrr:
581 case AArch64::ORNWrr:
582 case AArch64::ORNXrr:
583 case AArch64::ORRWrr:
584 case AArch64::ORRXrr:
585 return true;
586 }
587
588 llvm_unreachable("Unknown opcode to check as cheap as a move!");
589 }
590
isCoalescableExtInstr(const MachineInstr & MI,unsigned & SrcReg,unsigned & DstReg,unsigned & SubIdx) const591 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
592 unsigned &SrcReg, unsigned &DstReg,
593 unsigned &SubIdx) const {
594 switch (MI.getOpcode()) {
595 default:
596 return false;
597 case AArch64::SBFMXri: // aka sxtw
598 case AArch64::UBFMXri: // aka uxtw
599 // Check for the 32 -> 64 bit extension case, these instructions can do
600 // much more.
601 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
602 return false;
603 // This is a signed or unsigned 32 -> 64 bit extension.
604 SrcReg = MI.getOperand(1).getReg();
605 DstReg = MI.getOperand(0).getReg();
606 SubIdx = AArch64::sub_32;
607 return true;
608 }
609 }
610
611 bool
areMemAccessesTriviallyDisjoint(MachineInstr * MIa,MachineInstr * MIb,AliasAnalysis * AA) const612 AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
613 MachineInstr *MIb,
614 AliasAnalysis *AA) const {
615 const TargetRegisterInfo *TRI = &getRegisterInfo();
616 unsigned BaseRegA = 0, BaseRegB = 0;
617 int OffsetA = 0, OffsetB = 0;
618 int WidthA = 0, WidthB = 0;
619
620 assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
621 "MIa must be a store or a load");
622 assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
623 "MIb must be a store or a load");
624
625 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
626 MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
627 return false;
628
629 // Retrieve the base register, offset from the base register and width. Width
630 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
631 // base registers are identical, and the offset of a lower memory access +
632 // the width doesn't overlap the offset of a higher memory access,
633 // then the memory accesses are different.
634 if (getLdStBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
635 getLdStBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
636 if (BaseRegA == BaseRegB) {
637 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
638 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
639 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
640 if (LowOffset + LowWidth <= HighOffset)
641 return true;
642 }
643 }
644 return false;
645 }
646
647 /// analyzeCompare - For a comparison instruction, return the source registers
648 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
649 /// Return true if the comparison instruction can be analyzed.
analyzeCompare(const MachineInstr * MI,unsigned & SrcReg,unsigned & SrcReg2,int & CmpMask,int & CmpValue) const650 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
651 unsigned &SrcReg2, int &CmpMask,
652 int &CmpValue) const {
653 switch (MI->getOpcode()) {
654 default:
655 break;
656 case AArch64::SUBSWrr:
657 case AArch64::SUBSWrs:
658 case AArch64::SUBSWrx:
659 case AArch64::SUBSXrr:
660 case AArch64::SUBSXrs:
661 case AArch64::SUBSXrx:
662 case AArch64::ADDSWrr:
663 case AArch64::ADDSWrs:
664 case AArch64::ADDSWrx:
665 case AArch64::ADDSXrr:
666 case AArch64::ADDSXrs:
667 case AArch64::ADDSXrx:
668 // Replace SUBSWrr with SUBWrr if NZCV is not used.
669 SrcReg = MI->getOperand(1).getReg();
670 SrcReg2 = MI->getOperand(2).getReg();
671 CmpMask = ~0;
672 CmpValue = 0;
673 return true;
674 case AArch64::SUBSWri:
675 case AArch64::ADDSWri:
676 case AArch64::SUBSXri:
677 case AArch64::ADDSXri:
678 SrcReg = MI->getOperand(1).getReg();
679 SrcReg2 = 0;
680 CmpMask = ~0;
681 // FIXME: In order to convert CmpValue to 0 or 1
682 CmpValue = (MI->getOperand(2).getImm() != 0);
683 return true;
684 case AArch64::ANDSWri:
685 case AArch64::ANDSXri:
686 // ANDS does not use the same encoding scheme as the others xxxS
687 // instructions.
688 SrcReg = MI->getOperand(1).getReg();
689 SrcReg2 = 0;
690 CmpMask = ~0;
691 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
692 // while the type of CmpValue is int. When converting uint64_t to int,
693 // the high 32 bits of uint64_t will be lost.
694 // In fact it causes a bug in spec2006-483.xalancbmk
695 // CmpValue is only used to compare with zero in OptimizeCompareInstr
696 CmpValue = (AArch64_AM::decodeLogicalImmediate(
697 MI->getOperand(2).getImm(),
698 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
699 return true;
700 }
701
702 return false;
703 }
704
UpdateOperandRegClass(MachineInstr * Instr)705 static bool UpdateOperandRegClass(MachineInstr *Instr) {
706 MachineBasicBlock *MBB = Instr->getParent();
707 assert(MBB && "Can't get MachineBasicBlock here");
708 MachineFunction *MF = MBB->getParent();
709 assert(MF && "Can't get MachineFunction here");
710 const TargetMachine *TM = &MF->getTarget();
711 const TargetInstrInfo *TII = TM->getSubtargetImpl()->getInstrInfo();
712 const TargetRegisterInfo *TRI = TM->getSubtargetImpl()->getRegisterInfo();
713 MachineRegisterInfo *MRI = &MF->getRegInfo();
714
715 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
716 ++OpIdx) {
717 MachineOperand &MO = Instr->getOperand(OpIdx);
718 const TargetRegisterClass *OpRegCstraints =
719 Instr->getRegClassConstraint(OpIdx, TII, TRI);
720
721 // If there's no constraint, there's nothing to do.
722 if (!OpRegCstraints)
723 continue;
724 // If the operand is a frame index, there's nothing to do here.
725 // A frame index operand will resolve correctly during PEI.
726 if (MO.isFI())
727 continue;
728
729 assert(MO.isReg() &&
730 "Operand has register constraints without being a register!");
731
732 unsigned Reg = MO.getReg();
733 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
734 if (!OpRegCstraints->contains(Reg))
735 return false;
736 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
737 !MRI->constrainRegClass(Reg, OpRegCstraints))
738 return false;
739 }
740
741 return true;
742 }
743
744 /// \brief Return the opcode that does not set flags when possible - otherwise
745 /// return the original opcode. The caller is responsible to do the actual
746 /// substitution and legality checking.
convertFlagSettingOpcode(const MachineInstr * MI)747 static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
748 // Don't convert all compare instructions, because for some the zero register
749 // encoding becomes the sp register.
750 bool MIDefinesZeroReg = false;
751 if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
752 MIDefinesZeroReg = true;
753
754 switch (MI->getOpcode()) {
755 default:
756 return MI->getOpcode();
757 case AArch64::ADDSWrr:
758 return AArch64::ADDWrr;
759 case AArch64::ADDSWri:
760 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
761 case AArch64::ADDSWrs:
762 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
763 case AArch64::ADDSWrx:
764 return AArch64::ADDWrx;
765 case AArch64::ADDSXrr:
766 return AArch64::ADDXrr;
767 case AArch64::ADDSXri:
768 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
769 case AArch64::ADDSXrs:
770 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
771 case AArch64::ADDSXrx:
772 return AArch64::ADDXrx;
773 case AArch64::SUBSWrr:
774 return AArch64::SUBWrr;
775 case AArch64::SUBSWri:
776 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
777 case AArch64::SUBSWrs:
778 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
779 case AArch64::SUBSWrx:
780 return AArch64::SUBWrx;
781 case AArch64::SUBSXrr:
782 return AArch64::SUBXrr;
783 case AArch64::SUBSXri:
784 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
785 case AArch64::SUBSXrs:
786 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
787 case AArch64::SUBSXrx:
788 return AArch64::SUBXrx;
789 }
790 }
791
792 /// True when condition code could be modified on the instruction
793 /// trace starting at from and ending at to.
modifiesConditionCode(MachineInstr * From,MachineInstr * To,const bool CheckOnlyCCWrites,const TargetRegisterInfo * TRI)794 static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
795 const bool CheckOnlyCCWrites,
796 const TargetRegisterInfo *TRI) {
797 // We iterate backward starting \p To until we hit \p From
798 MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
799
800 // Early exit if To is at the beginning of the BB.
801 if (I == B)
802 return true;
803
804 // Check whether the definition of SrcReg is in the same basic block as
805 // Compare. If not, assume the condition code gets modified on some path.
806 if (To->getParent() != From->getParent())
807 return true;
808
809 // Check that NZCV isn't set on the trace.
810 for (--I; I != E; --I) {
811 const MachineInstr &Instr = *I;
812
813 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
814 (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
815 // This instruction modifies or uses NZCV after the one we want to
816 // change.
817 return true;
818 if (I == B)
819 // We currently don't allow the instruction trace to cross basic
820 // block boundaries
821 return true;
822 }
823 return false;
824 }
825 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
826 /// comparison into one that sets the zero bit in the flags register.
optimizeCompareInstr(MachineInstr * CmpInstr,unsigned SrcReg,unsigned SrcReg2,int CmpMask,int CmpValue,const MachineRegisterInfo * MRI) const827 bool AArch64InstrInfo::optimizeCompareInstr(
828 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
829 int CmpValue, const MachineRegisterInfo *MRI) const {
830
831 // Replace SUBSWrr with SUBWrr if NZCV is not used.
832 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
833 if (Cmp_NZCV != -1) {
834 if (CmpInstr->definesRegister(AArch64::WZR) ||
835 CmpInstr->definesRegister(AArch64::XZR)) {
836 CmpInstr->eraseFromParent();
837 return true;
838 }
839 unsigned Opc = CmpInstr->getOpcode();
840 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
841 if (NewOpc == Opc)
842 return false;
843 const MCInstrDesc &MCID = get(NewOpc);
844 CmpInstr->setDesc(MCID);
845 CmpInstr->RemoveOperand(Cmp_NZCV);
846 bool succeeded = UpdateOperandRegClass(CmpInstr);
847 (void)succeeded;
848 assert(succeeded && "Some operands reg class are incompatible!");
849 return true;
850 }
851
852 // Continue only if we have a "ri" where immediate is zero.
853 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
854 // function.
855 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
856 if (CmpValue != 0 || SrcReg2 != 0)
857 return false;
858
859 // CmpInstr is a Compare instruction if destination register is not used.
860 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
861 return false;
862
863 // Get the unique definition of SrcReg.
864 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
865 if (!MI)
866 return false;
867
868 bool CheckOnlyCCWrites = false;
869 const TargetRegisterInfo *TRI = &getRegisterInfo();
870 if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
871 return false;
872
873 unsigned NewOpc = MI->getOpcode();
874 switch (MI->getOpcode()) {
875 default:
876 return false;
877 case AArch64::ADDSWrr:
878 case AArch64::ADDSWri:
879 case AArch64::ADDSXrr:
880 case AArch64::ADDSXri:
881 case AArch64::SUBSWrr:
882 case AArch64::SUBSWri:
883 case AArch64::SUBSXrr:
884 case AArch64::SUBSXri:
885 break;
886 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
887 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
888 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
889 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
890 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
891 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
892 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
893 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
894 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
895 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
896 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
897 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
898 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
899 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
900 }
901
902 // Scan forward for the use of NZCV.
903 // When checking against MI: if it's a conditional code requires
904 // checking of V bit, then this is not safe to do.
905 // It is safe to remove CmpInstr if NZCV is redefined or killed.
906 // If we are done with the basic block, we need to check whether NZCV is
907 // live-out.
908 bool IsSafe = false;
909 for (MachineBasicBlock::iterator I = CmpInstr,
910 E = CmpInstr->getParent()->end();
911 !IsSafe && ++I != E;) {
912 const MachineInstr &Instr = *I;
913 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
914 ++IO) {
915 const MachineOperand &MO = Instr.getOperand(IO);
916 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
917 IsSafe = true;
918 break;
919 }
920 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
921 continue;
922 if (MO.isDef()) {
923 IsSafe = true;
924 break;
925 }
926
927 // Decode the condition code.
928 unsigned Opc = Instr.getOpcode();
929 AArch64CC::CondCode CC;
930 switch (Opc) {
931 default:
932 return false;
933 case AArch64::Bcc:
934 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
935 break;
936 case AArch64::CSINVWr:
937 case AArch64::CSINVXr:
938 case AArch64::CSINCWr:
939 case AArch64::CSINCXr:
940 case AArch64::CSELWr:
941 case AArch64::CSELXr:
942 case AArch64::CSNEGWr:
943 case AArch64::CSNEGXr:
944 case AArch64::FCSELSrrr:
945 case AArch64::FCSELDrrr:
946 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
947 break;
948 }
949
950 // It is not safe to remove Compare instruction if Overflow(V) is used.
951 switch (CC) {
952 default:
953 // NZCV can be used multiple times, we should continue.
954 break;
955 case AArch64CC::VS:
956 case AArch64CC::VC:
957 case AArch64CC::GE:
958 case AArch64CC::LT:
959 case AArch64CC::GT:
960 case AArch64CC::LE:
961 return false;
962 }
963 }
964 }
965
966 // If NZCV is not killed nor re-defined, we should check whether it is
967 // live-out. If it is live-out, do not optimize.
968 if (!IsSafe) {
969 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
970 for (auto *MBB : ParentBlock->successors())
971 if (MBB->isLiveIn(AArch64::NZCV))
972 return false;
973 }
974
975 // Update the instruction to set NZCV.
976 MI->setDesc(get(NewOpc));
977 CmpInstr->eraseFromParent();
978 bool succeeded = UpdateOperandRegClass(MI);
979 (void)succeeded;
980 assert(succeeded && "Some operands reg class are incompatible!");
981 MI->addRegisterDefined(AArch64::NZCV, TRI);
982 return true;
983 }
984
985 bool
expandPostRAPseudo(MachineBasicBlock::iterator MI) const986 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
987 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
988 return false;
989
990 MachineBasicBlock &MBB = *MI->getParent();
991 DebugLoc DL = MI->getDebugLoc();
992 unsigned Reg = MI->getOperand(0).getReg();
993 const GlobalValue *GV =
994 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
995 const TargetMachine &TM = MBB.getParent()->getTarget();
996 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
997 const unsigned char MO_NC = AArch64II::MO_NC;
998
999 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1000 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1001 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1002 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1003 .addReg(Reg, RegState::Kill).addImm(0)
1004 .addMemOperand(*MI->memoperands_begin());
1005 } else if (TM.getCodeModel() == CodeModel::Large) {
1006 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1007 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1008 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1009 .addReg(Reg, RegState::Kill)
1010 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1011 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1012 .addReg(Reg, RegState::Kill)
1013 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1014 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1015 .addReg(Reg, RegState::Kill)
1016 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1017 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1018 .addReg(Reg, RegState::Kill).addImm(0)
1019 .addMemOperand(*MI->memoperands_begin());
1020 } else {
1021 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1022 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1023 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1024 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1025 .addReg(Reg, RegState::Kill)
1026 .addGlobalAddress(GV, 0, LoFlags)
1027 .addMemOperand(*MI->memoperands_begin());
1028 }
1029
1030 MBB.erase(MI);
1031
1032 return true;
1033 }
1034
1035 /// Return true if this is this instruction has a non-zero immediate
hasShiftedReg(const MachineInstr * MI) const1036 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
1037 switch (MI->getOpcode()) {
1038 default:
1039 break;
1040 case AArch64::ADDSWrs:
1041 case AArch64::ADDSXrs:
1042 case AArch64::ADDWrs:
1043 case AArch64::ADDXrs:
1044 case AArch64::ANDSWrs:
1045 case AArch64::ANDSXrs:
1046 case AArch64::ANDWrs:
1047 case AArch64::ANDXrs:
1048 case AArch64::BICSWrs:
1049 case AArch64::BICSXrs:
1050 case AArch64::BICWrs:
1051 case AArch64::BICXrs:
1052 case AArch64::CRC32Brr:
1053 case AArch64::CRC32CBrr:
1054 case AArch64::CRC32CHrr:
1055 case AArch64::CRC32CWrr:
1056 case AArch64::CRC32CXrr:
1057 case AArch64::CRC32Hrr:
1058 case AArch64::CRC32Wrr:
1059 case AArch64::CRC32Xrr:
1060 case AArch64::EONWrs:
1061 case AArch64::EONXrs:
1062 case AArch64::EORWrs:
1063 case AArch64::EORXrs:
1064 case AArch64::ORNWrs:
1065 case AArch64::ORNXrs:
1066 case AArch64::ORRWrs:
1067 case AArch64::ORRXrs:
1068 case AArch64::SUBSWrs:
1069 case AArch64::SUBSXrs:
1070 case AArch64::SUBWrs:
1071 case AArch64::SUBXrs:
1072 if (MI->getOperand(3).isImm()) {
1073 unsigned val = MI->getOperand(3).getImm();
1074 return (val != 0);
1075 }
1076 break;
1077 }
1078 return false;
1079 }
1080
1081 /// Return true if this is this instruction has a non-zero immediate
hasExtendedReg(const MachineInstr * MI) const1082 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1083 switch (MI->getOpcode()) {
1084 default:
1085 break;
1086 case AArch64::ADDSWrx:
1087 case AArch64::ADDSXrx:
1088 case AArch64::ADDSXrx64:
1089 case AArch64::ADDWrx:
1090 case AArch64::ADDXrx:
1091 case AArch64::ADDXrx64:
1092 case AArch64::SUBSWrx:
1093 case AArch64::SUBSXrx:
1094 case AArch64::SUBSXrx64:
1095 case AArch64::SUBWrx:
1096 case AArch64::SUBXrx:
1097 case AArch64::SUBXrx64:
1098 if (MI->getOperand(3).isImm()) {
1099 unsigned val = MI->getOperand(3).getImm();
1100 return (val != 0);
1101 }
1102 break;
1103 }
1104
1105 return false;
1106 }
1107
1108 // Return true if this instruction simply sets its single destination register
1109 // to zero. This is equivalent to a register rename of the zero-register.
isGPRZero(const MachineInstr * MI) const1110 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1111 switch (MI->getOpcode()) {
1112 default:
1113 break;
1114 case AArch64::MOVZWi:
1115 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1116 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1117 assert(MI->getDesc().getNumOperands() == 3 &&
1118 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1119 return true;
1120 }
1121 break;
1122 case AArch64::ANDWri: // and Rd, Rzr, #imm
1123 return MI->getOperand(1).getReg() == AArch64::WZR;
1124 case AArch64::ANDXri:
1125 return MI->getOperand(1).getReg() == AArch64::XZR;
1126 case TargetOpcode::COPY:
1127 return MI->getOperand(1).getReg() == AArch64::WZR;
1128 }
1129 return false;
1130 }
1131
1132 // Return true if this instruction simply renames a general register without
1133 // modifying bits.
isGPRCopy(const MachineInstr * MI) const1134 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1135 switch (MI->getOpcode()) {
1136 default:
1137 break;
1138 case TargetOpcode::COPY: {
1139 // GPR32 copies will by lowered to ORRXrs
1140 unsigned DstReg = MI->getOperand(0).getReg();
1141 return (AArch64::GPR32RegClass.contains(DstReg) ||
1142 AArch64::GPR64RegClass.contains(DstReg));
1143 }
1144 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1145 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1146 assert(MI->getDesc().getNumOperands() == 4 &&
1147 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1148 return true;
1149 }
1150 break;
1151 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1152 if (MI->getOperand(2).getImm() == 0) {
1153 assert(MI->getDesc().getNumOperands() == 4 &&
1154 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1155 return true;
1156 }
1157 break;
1158 }
1159 return false;
1160 }
1161
1162 // Return true if this instruction simply renames a general register without
1163 // modifying bits.
isFPRCopy(const MachineInstr * MI) const1164 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1165 switch (MI->getOpcode()) {
1166 default:
1167 break;
1168 case TargetOpcode::COPY: {
1169 // FPR64 copies will by lowered to ORR.16b
1170 unsigned DstReg = MI->getOperand(0).getReg();
1171 return (AArch64::FPR64RegClass.contains(DstReg) ||
1172 AArch64::FPR128RegClass.contains(DstReg));
1173 }
1174 case AArch64::ORRv16i8:
1175 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1176 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1177 "invalid ORRv16i8 operands");
1178 return true;
1179 }
1180 break;
1181 }
1182 return false;
1183 }
1184
isLoadFromStackSlot(const MachineInstr * MI,int & FrameIndex) const1185 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1186 int &FrameIndex) const {
1187 switch (MI->getOpcode()) {
1188 default:
1189 break;
1190 case AArch64::LDRWui:
1191 case AArch64::LDRXui:
1192 case AArch64::LDRBui:
1193 case AArch64::LDRHui:
1194 case AArch64::LDRSui:
1195 case AArch64::LDRDui:
1196 case AArch64::LDRQui:
1197 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1198 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1199 FrameIndex = MI->getOperand(1).getIndex();
1200 return MI->getOperand(0).getReg();
1201 }
1202 break;
1203 }
1204
1205 return 0;
1206 }
1207
isStoreToStackSlot(const MachineInstr * MI,int & FrameIndex) const1208 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1209 int &FrameIndex) const {
1210 switch (MI->getOpcode()) {
1211 default:
1212 break;
1213 case AArch64::STRWui:
1214 case AArch64::STRXui:
1215 case AArch64::STRBui:
1216 case AArch64::STRHui:
1217 case AArch64::STRSui:
1218 case AArch64::STRDui:
1219 case AArch64::STRQui:
1220 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1221 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1222 FrameIndex = MI->getOperand(1).getIndex();
1223 return MI->getOperand(0).getReg();
1224 }
1225 break;
1226 }
1227 return 0;
1228 }
1229
1230 /// Return true if this is load/store scales or extends its register offset.
1231 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1232 /// MI should be a memory op that allows scaled addressing.
isScaledAddr(const MachineInstr * MI) const1233 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1234 switch (MI->getOpcode()) {
1235 default:
1236 break;
1237 case AArch64::LDRBBroW:
1238 case AArch64::LDRBroW:
1239 case AArch64::LDRDroW:
1240 case AArch64::LDRHHroW:
1241 case AArch64::LDRHroW:
1242 case AArch64::LDRQroW:
1243 case AArch64::LDRSBWroW:
1244 case AArch64::LDRSBXroW:
1245 case AArch64::LDRSHWroW:
1246 case AArch64::LDRSHXroW:
1247 case AArch64::LDRSWroW:
1248 case AArch64::LDRSroW:
1249 case AArch64::LDRWroW:
1250 case AArch64::LDRXroW:
1251 case AArch64::STRBBroW:
1252 case AArch64::STRBroW:
1253 case AArch64::STRDroW:
1254 case AArch64::STRHHroW:
1255 case AArch64::STRHroW:
1256 case AArch64::STRQroW:
1257 case AArch64::STRSroW:
1258 case AArch64::STRWroW:
1259 case AArch64::STRXroW:
1260 case AArch64::LDRBBroX:
1261 case AArch64::LDRBroX:
1262 case AArch64::LDRDroX:
1263 case AArch64::LDRHHroX:
1264 case AArch64::LDRHroX:
1265 case AArch64::LDRQroX:
1266 case AArch64::LDRSBWroX:
1267 case AArch64::LDRSBXroX:
1268 case AArch64::LDRSHWroX:
1269 case AArch64::LDRSHXroX:
1270 case AArch64::LDRSWroX:
1271 case AArch64::LDRSroX:
1272 case AArch64::LDRWroX:
1273 case AArch64::LDRXroX:
1274 case AArch64::STRBBroX:
1275 case AArch64::STRBroX:
1276 case AArch64::STRDroX:
1277 case AArch64::STRHHroX:
1278 case AArch64::STRHroX:
1279 case AArch64::STRQroX:
1280 case AArch64::STRSroX:
1281 case AArch64::STRWroX:
1282 case AArch64::STRXroX:
1283
1284 unsigned Val = MI->getOperand(3).getImm();
1285 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1286 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1287 }
1288 return false;
1289 }
1290
1291 /// Check all MachineMemOperands for a hint to suppress pairing.
isLdStPairSuppressed(const MachineInstr * MI) const1292 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1293 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1294 "Too many target MO flags");
1295 for (auto *MM : MI->memoperands()) {
1296 if (MM->getFlags() &
1297 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1298 return true;
1299 }
1300 }
1301 return false;
1302 }
1303
1304 /// Set a flag on the first MachineMemOperand to suppress pairing.
suppressLdStPair(MachineInstr * MI) const1305 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1306 if (MI->memoperands_empty())
1307 return;
1308
1309 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1310 "Too many target MO flags");
1311 (*MI->memoperands_begin())
1312 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1313 }
1314
1315 bool
getLdStBaseRegImmOfs(MachineInstr * LdSt,unsigned & BaseReg,unsigned & Offset,const TargetRegisterInfo * TRI) const1316 AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1317 unsigned &Offset,
1318 const TargetRegisterInfo *TRI) const {
1319 switch (LdSt->getOpcode()) {
1320 default:
1321 return false;
1322 case AArch64::STRSui:
1323 case AArch64::STRDui:
1324 case AArch64::STRQui:
1325 case AArch64::STRXui:
1326 case AArch64::STRWui:
1327 case AArch64::LDRSui:
1328 case AArch64::LDRDui:
1329 case AArch64::LDRQui:
1330 case AArch64::LDRXui:
1331 case AArch64::LDRWui:
1332 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1333 return false;
1334 BaseReg = LdSt->getOperand(1).getReg();
1335 MachineFunction &MF = *LdSt->getParent()->getParent();
1336 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1337 Offset = LdSt->getOperand(2).getImm() * Width;
1338 return true;
1339 };
1340 }
1341
getLdStBaseRegImmOfsWidth(MachineInstr * LdSt,unsigned & BaseReg,int & Offset,int & Width,const TargetRegisterInfo * TRI) const1342 bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
1343 MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
1344 const TargetRegisterInfo *TRI) const {
1345 // Handle only loads/stores with base register followed by immediate offset.
1346 if (LdSt->getNumOperands() != 3)
1347 return false;
1348 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1349 return false;
1350
1351 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1352 // Unscaled instructions have scaling factor set to 1.
1353 int Scale = 0;
1354 switch (LdSt->getOpcode()) {
1355 default:
1356 return false;
1357 case AArch64::LDURQi:
1358 case AArch64::STURQi:
1359 Width = 16;
1360 Scale = 1;
1361 break;
1362 case AArch64::LDURXi:
1363 case AArch64::LDURDi:
1364 case AArch64::STURXi:
1365 case AArch64::STURDi:
1366 Width = 8;
1367 Scale = 1;
1368 break;
1369 case AArch64::LDURWi:
1370 case AArch64::LDURSi:
1371 case AArch64::LDURSWi:
1372 case AArch64::STURWi:
1373 case AArch64::STURSi:
1374 Width = 4;
1375 Scale = 1;
1376 break;
1377 case AArch64::LDURHi:
1378 case AArch64::LDURHHi:
1379 case AArch64::LDURSHXi:
1380 case AArch64::LDURSHWi:
1381 case AArch64::STURHi:
1382 case AArch64::STURHHi:
1383 Width = 2;
1384 Scale = 1;
1385 break;
1386 case AArch64::LDURBi:
1387 case AArch64::LDURBBi:
1388 case AArch64::LDURSBXi:
1389 case AArch64::LDURSBWi:
1390 case AArch64::STURBi:
1391 case AArch64::STURBBi:
1392 Width = 1;
1393 Scale = 1;
1394 break;
1395 case AArch64::LDRXui:
1396 case AArch64::STRXui:
1397 Scale = Width = 8;
1398 break;
1399 case AArch64::LDRWui:
1400 case AArch64::STRWui:
1401 Scale = Width = 4;
1402 break;
1403 case AArch64::LDRBui:
1404 case AArch64::STRBui:
1405 Scale = Width = 1;
1406 break;
1407 case AArch64::LDRHui:
1408 case AArch64::STRHui:
1409 Scale = Width = 2;
1410 break;
1411 case AArch64::LDRSui:
1412 case AArch64::STRSui:
1413 Scale = Width = 4;
1414 break;
1415 case AArch64::LDRDui:
1416 case AArch64::STRDui:
1417 Scale = Width = 8;
1418 break;
1419 case AArch64::LDRQui:
1420 case AArch64::STRQui:
1421 Scale = Width = 16;
1422 break;
1423 case AArch64::LDRBBui:
1424 case AArch64::STRBBui:
1425 Scale = Width = 1;
1426 break;
1427 case AArch64::LDRHHui:
1428 case AArch64::STRHHui:
1429 Scale = Width = 2;
1430 break;
1431 };
1432
1433 BaseReg = LdSt->getOperand(1).getReg();
1434 Offset = LdSt->getOperand(2).getImm() * Scale;
1435 return true;
1436 }
1437
1438 /// Detect opportunities for ldp/stp formation.
1439 ///
1440 /// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
shouldClusterLoads(MachineInstr * FirstLdSt,MachineInstr * SecondLdSt,unsigned NumLoads) const1441 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1442 MachineInstr *SecondLdSt,
1443 unsigned NumLoads) const {
1444 // Only cluster up to a single pair.
1445 if (NumLoads > 1)
1446 return false;
1447 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1448 return false;
1449 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1450 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1451 // Allow 6 bits of positive range.
1452 if (Ofs1 > 64)
1453 return false;
1454 // The caller should already have ordered First/SecondLdSt by offset.
1455 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1456 return Ofs1 + 1 == Ofs2;
1457 }
1458
shouldScheduleAdjacent(MachineInstr * First,MachineInstr * Second) const1459 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1460 MachineInstr *Second) const {
1461 // Cyclone can fuse CMN, CMP followed by Bcc.
1462
1463 // FIXME: B0 can also fuse:
1464 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1465 if (Second->getOpcode() != AArch64::Bcc)
1466 return false;
1467 switch (First->getOpcode()) {
1468 default:
1469 return false;
1470 case AArch64::SUBSWri:
1471 case AArch64::ADDSWri:
1472 case AArch64::ANDSWri:
1473 case AArch64::SUBSXri:
1474 case AArch64::ADDSXri:
1475 case AArch64::ANDSXri:
1476 return true;
1477 }
1478 }
1479
emitFrameIndexDebugValue(MachineFunction & MF,int FrameIx,uint64_t Offset,const MDNode * Var,const MDNode * Expr,DebugLoc DL) const1480 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1481 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1482 const MDNode *Expr, DebugLoc DL) const {
1483 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1484 .addFrameIndex(FrameIx)
1485 .addImm(0)
1486 .addImm(Offset)
1487 .addMetadata(Var)
1488 .addMetadata(Expr);
1489 return &*MIB;
1490 }
1491
AddSubReg(const MachineInstrBuilder & MIB,unsigned Reg,unsigned SubIdx,unsigned State,const TargetRegisterInfo * TRI)1492 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1493 unsigned Reg, unsigned SubIdx,
1494 unsigned State,
1495 const TargetRegisterInfo *TRI) {
1496 if (!SubIdx)
1497 return MIB.addReg(Reg, State);
1498
1499 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1500 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1501 return MIB.addReg(Reg, State, SubIdx);
1502 }
1503
forwardCopyWillClobberTuple(unsigned DestReg,unsigned SrcReg,unsigned NumRegs)1504 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1505 unsigned NumRegs) {
1506 // We really want the positive remainder mod 32 here, that happens to be
1507 // easily obtainable with a mask.
1508 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1509 }
1510
copyPhysRegTuple(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,DebugLoc DL,unsigned DestReg,unsigned SrcReg,bool KillSrc,unsigned Opcode,llvm::ArrayRef<unsigned> Indices) const1511 void AArch64InstrInfo::copyPhysRegTuple(
1512 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1513 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1514 llvm::ArrayRef<unsigned> Indices) const {
1515 assert(Subtarget.hasNEON() &&
1516 "Unexpected register copy without NEON");
1517 const TargetRegisterInfo *TRI = &getRegisterInfo();
1518 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1519 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1520 unsigned NumRegs = Indices.size();
1521
1522 int SubReg = 0, End = NumRegs, Incr = 1;
1523 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1524 SubReg = NumRegs - 1;
1525 End = -1;
1526 Incr = -1;
1527 }
1528
1529 for (; SubReg != End; SubReg += Incr) {
1530 const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
1531 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1532 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1533 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1534 }
1535 }
1536
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,DebugLoc DL,unsigned DestReg,unsigned SrcReg,bool KillSrc) const1537 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1538 MachineBasicBlock::iterator I, DebugLoc DL,
1539 unsigned DestReg, unsigned SrcReg,
1540 bool KillSrc) const {
1541 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1542 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1543 const TargetRegisterInfo *TRI = &getRegisterInfo();
1544
1545 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1546 // If either operand is WSP, expand to ADD #0.
1547 if (Subtarget.hasZeroCycleRegMove()) {
1548 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1549 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1550 &AArch64::GPR64spRegClass);
1551 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1552 &AArch64::GPR64spRegClass);
1553 // This instruction is reading and writing X registers. This may upset
1554 // the register scavenger and machine verifier, so we need to indicate
1555 // that we are reading an undefined value from SrcRegX, but a proper
1556 // value from SrcReg.
1557 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1558 .addReg(SrcRegX, RegState::Undef)
1559 .addImm(0)
1560 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1561 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1562 } else {
1563 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1564 .addReg(SrcReg, getKillRegState(KillSrc))
1565 .addImm(0)
1566 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1567 }
1568 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1569 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1570 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1571 } else {
1572 if (Subtarget.hasZeroCycleRegMove()) {
1573 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1574 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1575 &AArch64::GPR64spRegClass);
1576 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1577 &AArch64::GPR64spRegClass);
1578 // This instruction is reading and writing X registers. This may upset
1579 // the register scavenger and machine verifier, so we need to indicate
1580 // that we are reading an undefined value from SrcRegX, but a proper
1581 // value from SrcReg.
1582 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1583 .addReg(AArch64::XZR)
1584 .addReg(SrcRegX, RegState::Undef)
1585 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1586 } else {
1587 // Otherwise, expand to ORR WZR.
1588 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1589 .addReg(AArch64::WZR)
1590 .addReg(SrcReg, getKillRegState(KillSrc));
1591 }
1592 }
1593 return;
1594 }
1595
1596 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1597 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1598 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1599 // If either operand is SP, expand to ADD #0.
1600 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1601 .addReg(SrcReg, getKillRegState(KillSrc))
1602 .addImm(0)
1603 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1604 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1605 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1606 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1607 } else {
1608 // Otherwise, expand to ORR XZR.
1609 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1610 .addReg(AArch64::XZR)
1611 .addReg(SrcReg, getKillRegState(KillSrc));
1612 }
1613 return;
1614 }
1615
1616 // Copy a DDDD register quad by copying the individual sub-registers.
1617 if (AArch64::DDDDRegClass.contains(DestReg) &&
1618 AArch64::DDDDRegClass.contains(SrcReg)) {
1619 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1620 AArch64::dsub2, AArch64::dsub3 };
1621 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1622 Indices);
1623 return;
1624 }
1625
1626 // Copy a DDD register triple by copying the individual sub-registers.
1627 if (AArch64::DDDRegClass.contains(DestReg) &&
1628 AArch64::DDDRegClass.contains(SrcReg)) {
1629 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1630 AArch64::dsub2 };
1631 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1632 Indices);
1633 return;
1634 }
1635
1636 // Copy a DD register pair by copying the individual sub-registers.
1637 if (AArch64::DDRegClass.contains(DestReg) &&
1638 AArch64::DDRegClass.contains(SrcReg)) {
1639 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1640 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1641 Indices);
1642 return;
1643 }
1644
1645 // Copy a QQQQ register quad by copying the individual sub-registers.
1646 if (AArch64::QQQQRegClass.contains(DestReg) &&
1647 AArch64::QQQQRegClass.contains(SrcReg)) {
1648 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1649 AArch64::qsub2, AArch64::qsub3 };
1650 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1651 Indices);
1652 return;
1653 }
1654
1655 // Copy a QQQ register triple by copying the individual sub-registers.
1656 if (AArch64::QQQRegClass.contains(DestReg) &&
1657 AArch64::QQQRegClass.contains(SrcReg)) {
1658 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1659 AArch64::qsub2 };
1660 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1661 Indices);
1662 return;
1663 }
1664
1665 // Copy a QQ register pair by copying the individual sub-registers.
1666 if (AArch64::QQRegClass.contains(DestReg) &&
1667 AArch64::QQRegClass.contains(SrcReg)) {
1668 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1669 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1670 Indices);
1671 return;
1672 }
1673
1674 if (AArch64::FPR128RegClass.contains(DestReg) &&
1675 AArch64::FPR128RegClass.contains(SrcReg)) {
1676 if(Subtarget.hasNEON()) {
1677 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1678 .addReg(SrcReg)
1679 .addReg(SrcReg, getKillRegState(KillSrc));
1680 } else {
1681 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1682 .addReg(AArch64::SP, RegState::Define)
1683 .addReg(SrcReg, getKillRegState(KillSrc))
1684 .addReg(AArch64::SP)
1685 .addImm(-16);
1686 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1687 .addReg(AArch64::SP, RegState::Define)
1688 .addReg(DestReg, RegState::Define)
1689 .addReg(AArch64::SP)
1690 .addImm(16);
1691 }
1692 return;
1693 }
1694
1695 if (AArch64::FPR64RegClass.contains(DestReg) &&
1696 AArch64::FPR64RegClass.contains(SrcReg)) {
1697 if(Subtarget.hasNEON()) {
1698 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1699 &AArch64::FPR128RegClass);
1700 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1701 &AArch64::FPR128RegClass);
1702 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1703 .addReg(SrcReg)
1704 .addReg(SrcReg, getKillRegState(KillSrc));
1705 } else {
1706 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1707 .addReg(SrcReg, getKillRegState(KillSrc));
1708 }
1709 return;
1710 }
1711
1712 if (AArch64::FPR32RegClass.contains(DestReg) &&
1713 AArch64::FPR32RegClass.contains(SrcReg)) {
1714 if(Subtarget.hasNEON()) {
1715 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1716 &AArch64::FPR128RegClass);
1717 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1718 &AArch64::FPR128RegClass);
1719 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1720 .addReg(SrcReg)
1721 .addReg(SrcReg, getKillRegState(KillSrc));
1722 } else {
1723 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1724 .addReg(SrcReg, getKillRegState(KillSrc));
1725 }
1726 return;
1727 }
1728
1729 if (AArch64::FPR16RegClass.contains(DestReg) &&
1730 AArch64::FPR16RegClass.contains(SrcReg)) {
1731 if(Subtarget.hasNEON()) {
1732 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1733 &AArch64::FPR128RegClass);
1734 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1735 &AArch64::FPR128RegClass);
1736 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1737 .addReg(SrcReg)
1738 .addReg(SrcReg, getKillRegState(KillSrc));
1739 } else {
1740 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1741 &AArch64::FPR32RegClass);
1742 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1743 &AArch64::FPR32RegClass);
1744 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1745 .addReg(SrcReg, getKillRegState(KillSrc));
1746 }
1747 return;
1748 }
1749
1750 if (AArch64::FPR8RegClass.contains(DestReg) &&
1751 AArch64::FPR8RegClass.contains(SrcReg)) {
1752 if(Subtarget.hasNEON()) {
1753 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1754 &AArch64::FPR128RegClass);
1755 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1756 &AArch64::FPR128RegClass);
1757 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1758 .addReg(SrcReg)
1759 .addReg(SrcReg, getKillRegState(KillSrc));
1760 } else {
1761 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1762 &AArch64::FPR32RegClass);
1763 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1764 &AArch64::FPR32RegClass);
1765 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1766 .addReg(SrcReg, getKillRegState(KillSrc));
1767 }
1768 return;
1769 }
1770
1771 // Copies between GPR64 and FPR64.
1772 if (AArch64::FPR64RegClass.contains(DestReg) &&
1773 AArch64::GPR64RegClass.contains(SrcReg)) {
1774 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1775 .addReg(SrcReg, getKillRegState(KillSrc));
1776 return;
1777 }
1778 if (AArch64::GPR64RegClass.contains(DestReg) &&
1779 AArch64::FPR64RegClass.contains(SrcReg)) {
1780 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1781 .addReg(SrcReg, getKillRegState(KillSrc));
1782 return;
1783 }
1784 // Copies between GPR32 and FPR32.
1785 if (AArch64::FPR32RegClass.contains(DestReg) &&
1786 AArch64::GPR32RegClass.contains(SrcReg)) {
1787 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1788 .addReg(SrcReg, getKillRegState(KillSrc));
1789 return;
1790 }
1791 if (AArch64::GPR32RegClass.contains(DestReg) &&
1792 AArch64::FPR32RegClass.contains(SrcReg)) {
1793 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1794 .addReg(SrcReg, getKillRegState(KillSrc));
1795 return;
1796 }
1797
1798 if (DestReg == AArch64::NZCV) {
1799 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1800 BuildMI(MBB, I, DL, get(AArch64::MSR))
1801 .addImm(AArch64SysReg::NZCV)
1802 .addReg(SrcReg, getKillRegState(KillSrc))
1803 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1804 return;
1805 }
1806
1807 if (SrcReg == AArch64::NZCV) {
1808 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1809 BuildMI(MBB, I, DL, get(AArch64::MRS))
1810 .addReg(DestReg)
1811 .addImm(AArch64SysReg::NZCV)
1812 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1813 return;
1814 }
1815
1816 llvm_unreachable("unimplemented reg-to-reg copy");
1817 }
1818
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,unsigned SrcReg,bool isKill,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const1819 void AArch64InstrInfo::storeRegToStackSlot(
1820 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1821 bool isKill, int FI, const TargetRegisterClass *RC,
1822 const TargetRegisterInfo *TRI) const {
1823 DebugLoc DL;
1824 if (MBBI != MBB.end())
1825 DL = MBBI->getDebugLoc();
1826 MachineFunction &MF = *MBB.getParent();
1827 MachineFrameInfo &MFI = *MF.getFrameInfo();
1828 unsigned Align = MFI.getObjectAlignment(FI);
1829
1830 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1831 MachineMemOperand *MMO = MF.getMachineMemOperand(
1832 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1833 unsigned Opc = 0;
1834 bool Offset = true;
1835 switch (RC->getSize()) {
1836 case 1:
1837 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1838 Opc = AArch64::STRBui;
1839 break;
1840 case 2:
1841 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1842 Opc = AArch64::STRHui;
1843 break;
1844 case 4:
1845 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1846 Opc = AArch64::STRWui;
1847 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1848 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1849 else
1850 assert(SrcReg != AArch64::WSP);
1851 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1852 Opc = AArch64::STRSui;
1853 break;
1854 case 8:
1855 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1856 Opc = AArch64::STRXui;
1857 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1858 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1859 else
1860 assert(SrcReg != AArch64::SP);
1861 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1862 Opc = AArch64::STRDui;
1863 break;
1864 case 16:
1865 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1866 Opc = AArch64::STRQui;
1867 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1868 assert(Subtarget.hasNEON() &&
1869 "Unexpected register store without NEON");
1870 Opc = AArch64::ST1Twov1d, Offset = false;
1871 }
1872 break;
1873 case 24:
1874 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1875 assert(Subtarget.hasNEON() &&
1876 "Unexpected register store without NEON");
1877 Opc = AArch64::ST1Threev1d, Offset = false;
1878 }
1879 break;
1880 case 32:
1881 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1882 assert(Subtarget.hasNEON() &&
1883 "Unexpected register store without NEON");
1884 Opc = AArch64::ST1Fourv1d, Offset = false;
1885 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1886 assert(Subtarget.hasNEON() &&
1887 "Unexpected register store without NEON");
1888 Opc = AArch64::ST1Twov2d, Offset = false;
1889 }
1890 break;
1891 case 48:
1892 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1893 assert(Subtarget.hasNEON() &&
1894 "Unexpected register store without NEON");
1895 Opc = AArch64::ST1Threev2d, Offset = false;
1896 }
1897 break;
1898 case 64:
1899 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1900 assert(Subtarget.hasNEON() &&
1901 "Unexpected register store without NEON");
1902 Opc = AArch64::ST1Fourv2d, Offset = false;
1903 }
1904 break;
1905 }
1906 assert(Opc && "Unknown register class");
1907
1908 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1909 .addReg(SrcReg, getKillRegState(isKill))
1910 .addFrameIndex(FI);
1911
1912 if (Offset)
1913 MI.addImm(0);
1914 MI.addMemOperand(MMO);
1915 }
1916
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,unsigned DestReg,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const1917 void AArch64InstrInfo::loadRegFromStackSlot(
1918 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1919 int FI, const TargetRegisterClass *RC,
1920 const TargetRegisterInfo *TRI) const {
1921 DebugLoc DL;
1922 if (MBBI != MBB.end())
1923 DL = MBBI->getDebugLoc();
1924 MachineFunction &MF = *MBB.getParent();
1925 MachineFrameInfo &MFI = *MF.getFrameInfo();
1926 unsigned Align = MFI.getObjectAlignment(FI);
1927 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1928 MachineMemOperand *MMO = MF.getMachineMemOperand(
1929 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1930
1931 unsigned Opc = 0;
1932 bool Offset = true;
1933 switch (RC->getSize()) {
1934 case 1:
1935 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1936 Opc = AArch64::LDRBui;
1937 break;
1938 case 2:
1939 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1940 Opc = AArch64::LDRHui;
1941 break;
1942 case 4:
1943 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1944 Opc = AArch64::LDRWui;
1945 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1946 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1947 else
1948 assert(DestReg != AArch64::WSP);
1949 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1950 Opc = AArch64::LDRSui;
1951 break;
1952 case 8:
1953 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1954 Opc = AArch64::LDRXui;
1955 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1956 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1957 else
1958 assert(DestReg != AArch64::SP);
1959 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1960 Opc = AArch64::LDRDui;
1961 break;
1962 case 16:
1963 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1964 Opc = AArch64::LDRQui;
1965 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1966 assert(Subtarget.hasNEON() &&
1967 "Unexpected register load without NEON");
1968 Opc = AArch64::LD1Twov1d, Offset = false;
1969 }
1970 break;
1971 case 24:
1972 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1973 assert(Subtarget.hasNEON() &&
1974 "Unexpected register load without NEON");
1975 Opc = AArch64::LD1Threev1d, Offset = false;
1976 }
1977 break;
1978 case 32:
1979 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1980 assert(Subtarget.hasNEON() &&
1981 "Unexpected register load without NEON");
1982 Opc = AArch64::LD1Fourv1d, Offset = false;
1983 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1984 assert(Subtarget.hasNEON() &&
1985 "Unexpected register load without NEON");
1986 Opc = AArch64::LD1Twov2d, Offset = false;
1987 }
1988 break;
1989 case 48:
1990 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1991 assert(Subtarget.hasNEON() &&
1992 "Unexpected register load without NEON");
1993 Opc = AArch64::LD1Threev2d, Offset = false;
1994 }
1995 break;
1996 case 64:
1997 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1998 assert(Subtarget.hasNEON() &&
1999 "Unexpected register load without NEON");
2000 Opc = AArch64::LD1Fourv2d, Offset = false;
2001 }
2002 break;
2003 }
2004 assert(Opc && "Unknown register class");
2005
2006 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
2007 .addReg(DestReg, getDefRegState(true))
2008 .addFrameIndex(FI);
2009 if (Offset)
2010 MI.addImm(0);
2011 MI.addMemOperand(MMO);
2012 }
2013
emitFrameOffset(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc DL,unsigned DestReg,unsigned SrcReg,int Offset,const TargetInstrInfo * TII,MachineInstr::MIFlag Flag,bool SetNZCV)2014 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2015 MachineBasicBlock::iterator MBBI, DebugLoc DL,
2016 unsigned DestReg, unsigned SrcReg, int Offset,
2017 const TargetInstrInfo *TII,
2018 MachineInstr::MIFlag Flag, bool SetNZCV) {
2019 if (DestReg == SrcReg && Offset == 0)
2020 return;
2021
2022 bool isSub = Offset < 0;
2023 if (isSub)
2024 Offset = -Offset;
2025
2026 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2027 // scratch register. If DestReg is a virtual register, use it as the
2028 // scratch register; otherwise, create a new virtual register (to be
2029 // replaced by the scavenger at the end of PEI). That case can be optimized
2030 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2031 // register can be loaded with offset%8 and the add/sub can use an extending
2032 // instruction with LSL#3.
2033 // Currently the function handles any offsets but generates a poor sequence
2034 // of code.
2035 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2036
2037 unsigned Opc;
2038 if (SetNZCV)
2039 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2040 else
2041 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2042 const unsigned MaxEncoding = 0xfff;
2043 const unsigned ShiftSize = 12;
2044 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2045 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2046 unsigned ThisVal;
2047 if (((unsigned)Offset) > MaxEncodableValue) {
2048 ThisVal = MaxEncodableValue;
2049 } else {
2050 ThisVal = Offset & MaxEncodableValue;
2051 }
2052 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2053 "Encoding cannot handle value that big");
2054 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2055 .addReg(SrcReg)
2056 .addImm(ThisVal >> ShiftSize)
2057 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2058 .setMIFlag(Flag);
2059
2060 SrcReg = DestReg;
2061 Offset -= ThisVal;
2062 if (Offset == 0)
2063 return;
2064 }
2065 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2066 .addReg(SrcReg)
2067 .addImm(Offset)
2068 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2069 .setMIFlag(Flag);
2070 }
2071
2072 MachineInstr *
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops,int FrameIndex) const2073 AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
2074 const SmallVectorImpl<unsigned> &Ops,
2075 int FrameIndex) const {
2076 // This is a bit of a hack. Consider this instruction:
2077 //
2078 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2079 //
2080 // We explicitly chose GPR64all for the virtual register so such a copy might
2081 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2082 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2083 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2084 //
2085 // To prevent that, we are going to constrain the %vreg0 register class here.
2086 //
2087 // <rdar://problem/11522048>
2088 //
2089 if (MI->isCopy()) {
2090 unsigned DstReg = MI->getOperand(0).getReg();
2091 unsigned SrcReg = MI->getOperand(1).getReg();
2092 if (SrcReg == AArch64::SP &&
2093 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2094 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2095 return nullptr;
2096 }
2097 if (DstReg == AArch64::SP &&
2098 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2099 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2100 return nullptr;
2101 }
2102 }
2103
2104 // Cannot fold.
2105 return nullptr;
2106 }
2107
isAArch64FrameOffsetLegal(const MachineInstr & MI,int & Offset,bool * OutUseUnscaledOp,unsigned * OutUnscaledOp,int * EmittableOffset)2108 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2109 bool *OutUseUnscaledOp,
2110 unsigned *OutUnscaledOp,
2111 int *EmittableOffset) {
2112 int Scale = 1;
2113 bool IsSigned = false;
2114 // The ImmIdx should be changed case by case if it is not 2.
2115 unsigned ImmIdx = 2;
2116 unsigned UnscaledOp = 0;
2117 // Set output values in case of early exit.
2118 if (EmittableOffset)
2119 *EmittableOffset = 0;
2120 if (OutUseUnscaledOp)
2121 *OutUseUnscaledOp = false;
2122 if (OutUnscaledOp)
2123 *OutUnscaledOp = 0;
2124 switch (MI.getOpcode()) {
2125 default:
2126 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2127 // Vector spills/fills can't take an immediate offset.
2128 case AArch64::LD1Twov2d:
2129 case AArch64::LD1Threev2d:
2130 case AArch64::LD1Fourv2d:
2131 case AArch64::LD1Twov1d:
2132 case AArch64::LD1Threev1d:
2133 case AArch64::LD1Fourv1d:
2134 case AArch64::ST1Twov2d:
2135 case AArch64::ST1Threev2d:
2136 case AArch64::ST1Fourv2d:
2137 case AArch64::ST1Twov1d:
2138 case AArch64::ST1Threev1d:
2139 case AArch64::ST1Fourv1d:
2140 return AArch64FrameOffsetCannotUpdate;
2141 case AArch64::PRFMui:
2142 Scale = 8;
2143 UnscaledOp = AArch64::PRFUMi;
2144 break;
2145 case AArch64::LDRXui:
2146 Scale = 8;
2147 UnscaledOp = AArch64::LDURXi;
2148 break;
2149 case AArch64::LDRWui:
2150 Scale = 4;
2151 UnscaledOp = AArch64::LDURWi;
2152 break;
2153 case AArch64::LDRBui:
2154 Scale = 1;
2155 UnscaledOp = AArch64::LDURBi;
2156 break;
2157 case AArch64::LDRHui:
2158 Scale = 2;
2159 UnscaledOp = AArch64::LDURHi;
2160 break;
2161 case AArch64::LDRSui:
2162 Scale = 4;
2163 UnscaledOp = AArch64::LDURSi;
2164 break;
2165 case AArch64::LDRDui:
2166 Scale = 8;
2167 UnscaledOp = AArch64::LDURDi;
2168 break;
2169 case AArch64::LDRQui:
2170 Scale = 16;
2171 UnscaledOp = AArch64::LDURQi;
2172 break;
2173 case AArch64::LDRBBui:
2174 Scale = 1;
2175 UnscaledOp = AArch64::LDURBBi;
2176 break;
2177 case AArch64::LDRHHui:
2178 Scale = 2;
2179 UnscaledOp = AArch64::LDURHHi;
2180 break;
2181 case AArch64::LDRSBXui:
2182 Scale = 1;
2183 UnscaledOp = AArch64::LDURSBXi;
2184 break;
2185 case AArch64::LDRSBWui:
2186 Scale = 1;
2187 UnscaledOp = AArch64::LDURSBWi;
2188 break;
2189 case AArch64::LDRSHXui:
2190 Scale = 2;
2191 UnscaledOp = AArch64::LDURSHXi;
2192 break;
2193 case AArch64::LDRSHWui:
2194 Scale = 2;
2195 UnscaledOp = AArch64::LDURSHWi;
2196 break;
2197 case AArch64::LDRSWui:
2198 Scale = 4;
2199 UnscaledOp = AArch64::LDURSWi;
2200 break;
2201
2202 case AArch64::STRXui:
2203 Scale = 8;
2204 UnscaledOp = AArch64::STURXi;
2205 break;
2206 case AArch64::STRWui:
2207 Scale = 4;
2208 UnscaledOp = AArch64::STURWi;
2209 break;
2210 case AArch64::STRBui:
2211 Scale = 1;
2212 UnscaledOp = AArch64::STURBi;
2213 break;
2214 case AArch64::STRHui:
2215 Scale = 2;
2216 UnscaledOp = AArch64::STURHi;
2217 break;
2218 case AArch64::STRSui:
2219 Scale = 4;
2220 UnscaledOp = AArch64::STURSi;
2221 break;
2222 case AArch64::STRDui:
2223 Scale = 8;
2224 UnscaledOp = AArch64::STURDi;
2225 break;
2226 case AArch64::STRQui:
2227 Scale = 16;
2228 UnscaledOp = AArch64::STURQi;
2229 break;
2230 case AArch64::STRBBui:
2231 Scale = 1;
2232 UnscaledOp = AArch64::STURBBi;
2233 break;
2234 case AArch64::STRHHui:
2235 Scale = 2;
2236 UnscaledOp = AArch64::STURHHi;
2237 break;
2238
2239 case AArch64::LDPXi:
2240 case AArch64::LDPDi:
2241 case AArch64::STPXi:
2242 case AArch64::STPDi:
2243 IsSigned = true;
2244 Scale = 8;
2245 break;
2246 case AArch64::LDPQi:
2247 case AArch64::STPQi:
2248 IsSigned = true;
2249 Scale = 16;
2250 break;
2251 case AArch64::LDPWi:
2252 case AArch64::LDPSi:
2253 case AArch64::STPWi:
2254 case AArch64::STPSi:
2255 IsSigned = true;
2256 Scale = 4;
2257 break;
2258
2259 case AArch64::LDURXi:
2260 case AArch64::LDURWi:
2261 case AArch64::LDURBi:
2262 case AArch64::LDURHi:
2263 case AArch64::LDURSi:
2264 case AArch64::LDURDi:
2265 case AArch64::LDURQi:
2266 case AArch64::LDURHHi:
2267 case AArch64::LDURBBi:
2268 case AArch64::LDURSBXi:
2269 case AArch64::LDURSBWi:
2270 case AArch64::LDURSHXi:
2271 case AArch64::LDURSHWi:
2272 case AArch64::LDURSWi:
2273 case AArch64::STURXi:
2274 case AArch64::STURWi:
2275 case AArch64::STURBi:
2276 case AArch64::STURHi:
2277 case AArch64::STURSi:
2278 case AArch64::STURDi:
2279 case AArch64::STURQi:
2280 case AArch64::STURBBi:
2281 case AArch64::STURHHi:
2282 Scale = 1;
2283 break;
2284 }
2285
2286 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2287
2288 bool useUnscaledOp = false;
2289 // If the offset doesn't match the scale, we rewrite the instruction to
2290 // use the unscaled instruction instead. Likewise, if we have a negative
2291 // offset (and have an unscaled op to use).
2292 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2293 useUnscaledOp = true;
2294
2295 // Use an unscaled addressing mode if the instruction has a negative offset
2296 // (or if the instruction is already using an unscaled addressing mode).
2297 unsigned MaskBits;
2298 if (IsSigned) {
2299 // ldp/stp instructions.
2300 MaskBits = 7;
2301 Offset /= Scale;
2302 } else if (UnscaledOp == 0 || useUnscaledOp) {
2303 MaskBits = 9;
2304 IsSigned = true;
2305 Scale = 1;
2306 } else {
2307 MaskBits = 12;
2308 IsSigned = false;
2309 Offset /= Scale;
2310 }
2311
2312 // Attempt to fold address computation.
2313 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2314 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2315 if (Offset >= MinOff && Offset <= MaxOff) {
2316 if (EmittableOffset)
2317 *EmittableOffset = Offset;
2318 Offset = 0;
2319 } else {
2320 int NewOff = Offset < 0 ? MinOff : MaxOff;
2321 if (EmittableOffset)
2322 *EmittableOffset = NewOff;
2323 Offset = (Offset - NewOff) * Scale;
2324 }
2325 if (OutUseUnscaledOp)
2326 *OutUseUnscaledOp = useUnscaledOp;
2327 if (OutUnscaledOp)
2328 *OutUnscaledOp = UnscaledOp;
2329 return AArch64FrameOffsetCanUpdate |
2330 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2331 }
2332
rewriteAArch64FrameIndex(MachineInstr & MI,unsigned FrameRegIdx,unsigned FrameReg,int & Offset,const AArch64InstrInfo * TII)2333 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2334 unsigned FrameReg, int &Offset,
2335 const AArch64InstrInfo *TII) {
2336 unsigned Opcode = MI.getOpcode();
2337 unsigned ImmIdx = FrameRegIdx + 1;
2338
2339 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2340 Offset += MI.getOperand(ImmIdx).getImm();
2341 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2342 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2343 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2344 MI.eraseFromParent();
2345 Offset = 0;
2346 return true;
2347 }
2348
2349 int NewOffset;
2350 unsigned UnscaledOp;
2351 bool UseUnscaledOp;
2352 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2353 &UnscaledOp, &NewOffset);
2354 if (Status & AArch64FrameOffsetCanUpdate) {
2355 if (Status & AArch64FrameOffsetIsLegal)
2356 // Replace the FrameIndex with FrameReg.
2357 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2358 if (UseUnscaledOp)
2359 MI.setDesc(TII->get(UnscaledOp));
2360
2361 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2362 return Offset == 0;
2363 }
2364
2365 return false;
2366 }
2367
getNoopForMachoTarget(MCInst & NopInst) const2368 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2369 NopInst.setOpcode(AArch64::HINT);
2370 NopInst.addOperand(MCOperand::CreateImm(0));
2371 }
2372 /// useMachineCombiner - return true when a target supports MachineCombiner
useMachineCombiner() const2373 bool AArch64InstrInfo::useMachineCombiner() const {
2374 // AArch64 supports the combiner
2375 return true;
2376 }
2377 //
2378 // True when Opc sets flag
isCombineInstrSettingFlag(unsigned Opc)2379 static bool isCombineInstrSettingFlag(unsigned Opc) {
2380 switch (Opc) {
2381 case AArch64::ADDSWrr:
2382 case AArch64::ADDSWri:
2383 case AArch64::ADDSXrr:
2384 case AArch64::ADDSXri:
2385 case AArch64::SUBSWrr:
2386 case AArch64::SUBSXrr:
2387 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2388 case AArch64::SUBSWri:
2389 case AArch64::SUBSXri:
2390 return true;
2391 default:
2392 break;
2393 }
2394 return false;
2395 }
2396 //
2397 // 32b Opcodes that can be combined with a MUL
isCombineInstrCandidate32(unsigned Opc)2398 static bool isCombineInstrCandidate32(unsigned Opc) {
2399 switch (Opc) {
2400 case AArch64::ADDWrr:
2401 case AArch64::ADDWri:
2402 case AArch64::SUBWrr:
2403 case AArch64::ADDSWrr:
2404 case AArch64::ADDSWri:
2405 case AArch64::SUBSWrr:
2406 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2407 case AArch64::SUBWri:
2408 case AArch64::SUBSWri:
2409 return true;
2410 default:
2411 break;
2412 }
2413 return false;
2414 }
2415 //
2416 // 64b Opcodes that can be combined with a MUL
isCombineInstrCandidate64(unsigned Opc)2417 static bool isCombineInstrCandidate64(unsigned Opc) {
2418 switch (Opc) {
2419 case AArch64::ADDXrr:
2420 case AArch64::ADDXri:
2421 case AArch64::SUBXrr:
2422 case AArch64::ADDSXrr:
2423 case AArch64::ADDSXri:
2424 case AArch64::SUBSXrr:
2425 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2426 case AArch64::SUBXri:
2427 case AArch64::SUBSXri:
2428 return true;
2429 default:
2430 break;
2431 }
2432 return false;
2433 }
2434 //
2435 // Opcodes that can be combined with a MUL
isCombineInstrCandidate(unsigned Opc)2436 static bool isCombineInstrCandidate(unsigned Opc) {
2437 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2438 }
2439
canCombineWithMUL(MachineBasicBlock & MBB,MachineOperand & MO,unsigned MulOpc,unsigned ZeroReg)2440 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2441 unsigned MulOpc, unsigned ZeroReg) {
2442 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2443 MachineInstr *MI = nullptr;
2444 // We need a virtual register definition.
2445 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2446 MI = MRI.getUniqueVRegDef(MO.getReg());
2447 // And it needs to be in the trace (otherwise, it won't have a depth).
2448 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2449 return false;
2450
2451 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2452 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2453 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2454
2455 // The third input reg must be zero.
2456 if (MI->getOperand(3).getReg() != ZeroReg)
2457 return false;
2458
2459 // Must only used by the user we combine with.
2460 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2461 return false;
2462
2463 return true;
2464 }
2465
2466 /// hasPattern - return true when there is potentially a faster code sequence
2467 /// for an instruction chain ending in \p Root. All potential patterns are
2468 /// listed
2469 /// in the \p Pattern vector. Pattern should be sorted in priority order since
2470 /// the pattern evaluator stops checking as soon as it finds a faster sequence.
2471
hasPattern(MachineInstr & Root,SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> & Pattern) const2472 bool AArch64InstrInfo::hasPattern(
2473 MachineInstr &Root,
2474 SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Pattern) const {
2475 unsigned Opc = Root.getOpcode();
2476 MachineBasicBlock &MBB = *Root.getParent();
2477 bool Found = false;
2478
2479 if (!isCombineInstrCandidate(Opc))
2480 return 0;
2481 if (isCombineInstrSettingFlag(Opc)) {
2482 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2483 // When NZCV is live bail out.
2484 if (Cmp_NZCV == -1)
2485 return 0;
2486 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2487 // When opcode can't change bail out.
2488 // CHECKME: do we miss any cases for opcode conversion?
2489 if (NewOpc == Opc)
2490 return 0;
2491 Opc = NewOpc;
2492 }
2493
2494 switch (Opc) {
2495 default:
2496 break;
2497 case AArch64::ADDWrr:
2498 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2499 "ADDWrr does not have register operands");
2500 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2501 AArch64::WZR)) {
2502 Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
2503 Found = true;
2504 }
2505 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2506 AArch64::WZR)) {
2507 Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
2508 Found = true;
2509 }
2510 break;
2511 case AArch64::ADDXrr:
2512 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2513 AArch64::XZR)) {
2514 Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
2515 Found = true;
2516 }
2517 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2518 AArch64::XZR)) {
2519 Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
2520 Found = true;
2521 }
2522 break;
2523 case AArch64::SUBWrr:
2524 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2525 AArch64::WZR)) {
2526 Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
2527 Found = true;
2528 }
2529 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2530 AArch64::WZR)) {
2531 Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
2532 Found = true;
2533 }
2534 break;
2535 case AArch64::SUBXrr:
2536 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2537 AArch64::XZR)) {
2538 Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
2539 Found = true;
2540 }
2541 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2542 AArch64::XZR)) {
2543 Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
2544 Found = true;
2545 }
2546 break;
2547 case AArch64::ADDWri:
2548 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2549 AArch64::WZR)) {
2550 Pattern.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
2551 Found = true;
2552 }
2553 break;
2554 case AArch64::ADDXri:
2555 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2556 AArch64::XZR)) {
2557 Pattern.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
2558 Found = true;
2559 }
2560 break;
2561 case AArch64::SUBWri:
2562 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2563 AArch64::WZR)) {
2564 Pattern.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
2565 Found = true;
2566 }
2567 break;
2568 case AArch64::SUBXri:
2569 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2570 AArch64::XZR)) {
2571 Pattern.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
2572 Found = true;
2573 }
2574 break;
2575 }
2576 return Found;
2577 }
2578
2579 /// genMadd - Generate madd instruction and combine mul and add.
2580 /// Example:
2581 /// MUL I=A,B,0
2582 /// ADD R,I,C
2583 /// ==> MADD R,A,B,C
2584 /// \param Root is the ADD instruction
2585 /// \param [out] InsInstrs is a vector of machine instructions and will
2586 /// contain the generated madd instruction
2587 /// \param IdxMulOpd is index of operand in Root that is the result of
2588 /// the MUL. In the example above IdxMulOpd is 1.
2589 /// \param MaddOpc the opcode fo the madd instruction
genMadd(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,unsigned IdxMulOpd,unsigned MaddOpc,const TargetRegisterClass * RC)2590 static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2591 const TargetInstrInfo *TII, MachineInstr &Root,
2592 SmallVectorImpl<MachineInstr *> &InsInstrs,
2593 unsigned IdxMulOpd, unsigned MaddOpc,
2594 const TargetRegisterClass *RC) {
2595 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2596
2597 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2598 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2599 unsigned ResultReg = Root.getOperand(0).getReg();
2600 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2601 bool Src0IsKill = MUL->getOperand(1).isKill();
2602 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2603 bool Src1IsKill = MUL->getOperand(2).isKill();
2604 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
2605 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
2606
2607 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2608 MRI.constrainRegClass(ResultReg, RC);
2609 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2610 MRI.constrainRegClass(SrcReg0, RC);
2611 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2612 MRI.constrainRegClass(SrcReg1, RC);
2613 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
2614 MRI.constrainRegClass(SrcReg2, RC);
2615
2616 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2617 ResultReg)
2618 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2619 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2620 .addReg(SrcReg2, getKillRegState(Src2IsKill));
2621 // Insert the MADD
2622 InsInstrs.push_back(MIB);
2623 return MUL;
2624 }
2625
2626 /// genMaddR - Generate madd instruction and combine mul and add using
2627 /// an extra virtual register
2628 /// Example - an ADD intermediate needs to be stored in a register:
2629 /// MUL I=A,B,0
2630 /// ADD R,I,Imm
2631 /// ==> ORR V, ZR, Imm
2632 /// ==> MADD R,A,B,V
2633 /// \param Root is the ADD instruction
2634 /// \param [out] InsInstrs is a vector of machine instructions and will
2635 /// contain the generated madd instruction
2636 /// \param IdxMulOpd is index of operand in Root that is the result of
2637 /// the MUL. In the example above IdxMulOpd is 1.
2638 /// \param MaddOpc the opcode fo the madd instruction
2639 /// \param VR is a virtual register that holds the value of an ADD operand
2640 /// (V in the example above).
genMaddR(MachineFunction & MF,MachineRegisterInfo & MRI,const TargetInstrInfo * TII,MachineInstr & Root,SmallVectorImpl<MachineInstr * > & InsInstrs,unsigned IdxMulOpd,unsigned MaddOpc,unsigned VR,const TargetRegisterClass * RC)2641 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2642 const TargetInstrInfo *TII, MachineInstr &Root,
2643 SmallVectorImpl<MachineInstr *> &InsInstrs,
2644 unsigned IdxMulOpd, unsigned MaddOpc,
2645 unsigned VR, const TargetRegisterClass *RC) {
2646 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2647
2648 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2649 unsigned ResultReg = Root.getOperand(0).getReg();
2650 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2651 bool Src0IsKill = MUL->getOperand(1).isKill();
2652 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2653 bool Src1IsKill = MUL->getOperand(2).isKill();
2654
2655 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2656 MRI.constrainRegClass(ResultReg, RC);
2657 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2658 MRI.constrainRegClass(SrcReg0, RC);
2659 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2660 MRI.constrainRegClass(SrcReg1, RC);
2661 if (TargetRegisterInfo::isVirtualRegister(VR))
2662 MRI.constrainRegClass(VR, RC);
2663
2664 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2665 ResultReg)
2666 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2667 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2668 .addReg(VR);
2669 // Insert the MADD
2670 InsInstrs.push_back(MIB);
2671 return MUL;
2672 }
2673
2674 /// genAlternativeCodeSequence - when hasPattern() finds a pattern
2675 /// this function generates the instructions that could replace the
2676 /// original code sequence
genAlternativeCodeSequence(MachineInstr & Root,MachineCombinerPattern::MC_PATTERN Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg) const2677 void AArch64InstrInfo::genAlternativeCodeSequence(
2678 MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
2679 SmallVectorImpl<MachineInstr *> &InsInstrs,
2680 SmallVectorImpl<MachineInstr *> &DelInstrs,
2681 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2682 MachineBasicBlock &MBB = *Root.getParent();
2683 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2684 MachineFunction &MF = *MBB.getParent();
2685 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2686
2687 MachineInstr *MUL;
2688 const TargetRegisterClass *RC;
2689 unsigned Opc;
2690 switch (Pattern) {
2691 default:
2692 // signal error.
2693 break;
2694 case MachineCombinerPattern::MC_MULADDW_OP1:
2695 case MachineCombinerPattern::MC_MULADDX_OP1:
2696 // MUL I=A,B,0
2697 // ADD R,I,C
2698 // ==> MADD R,A,B,C
2699 // --- Create(MADD);
2700 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP1) {
2701 Opc = AArch64::MADDWrrr;
2702 RC = &AArch64::GPR32RegClass;
2703 } else {
2704 Opc = AArch64::MADDXrrr;
2705 RC = &AArch64::GPR64RegClass;
2706 }
2707 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
2708 break;
2709 case MachineCombinerPattern::MC_MULADDW_OP2:
2710 case MachineCombinerPattern::MC_MULADDX_OP2:
2711 // MUL I=A,B,0
2712 // ADD R,C,I
2713 // ==> MADD R,A,B,C
2714 // --- Create(MADD);
2715 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP2) {
2716 Opc = AArch64::MADDWrrr;
2717 RC = &AArch64::GPR32RegClass;
2718 } else {
2719 Opc = AArch64::MADDXrrr;
2720 RC = &AArch64::GPR64RegClass;
2721 }
2722 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2723 break;
2724 case MachineCombinerPattern::MC_MULADDWI_OP1:
2725 case MachineCombinerPattern::MC_MULADDXI_OP1: {
2726 // MUL I=A,B,0
2727 // ADD R,I,Imm
2728 // ==> ORR V, ZR, Imm
2729 // ==> MADD R,A,B,V
2730 // --- Create(MADD);
2731 const TargetRegisterClass *OrrRC;
2732 unsigned BitSize, OrrOpc, ZeroReg;
2733 if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) {
2734 OrrOpc = AArch64::ORRWri;
2735 OrrRC = &AArch64::GPR32spRegClass;
2736 BitSize = 32;
2737 ZeroReg = AArch64::WZR;
2738 Opc = AArch64::MADDWrrr;
2739 RC = &AArch64::GPR32RegClass;
2740 } else {
2741 OrrOpc = AArch64::ORRXri;
2742 OrrRC = &AArch64::GPR64spRegClass;
2743 BitSize = 64;
2744 ZeroReg = AArch64::XZR;
2745 Opc = AArch64::MADDXrrr;
2746 RC = &AArch64::GPR64RegClass;
2747 }
2748 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2749 uint64_t Imm = Root.getOperand(2).getImm();
2750
2751 if (Root.getOperand(3).isImm()) {
2752 unsigned Val = Root.getOperand(3).getImm();
2753 Imm = Imm << Val;
2754 }
2755 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2756 uint64_t Encoding;
2757 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2758 MachineInstrBuilder MIB1 =
2759 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2760 .addReg(ZeroReg)
2761 .addImm(Encoding);
2762 InsInstrs.push_back(MIB1);
2763 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2764 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2765 }
2766 break;
2767 }
2768 case MachineCombinerPattern::MC_MULSUBW_OP1:
2769 case MachineCombinerPattern::MC_MULSUBX_OP1: {
2770 // MUL I=A,B,0
2771 // SUB R,I, C
2772 // ==> SUB V, 0, C
2773 // ==> MADD R,A,B,V // = -C + A*B
2774 // --- Create(MADD);
2775 const TargetRegisterClass *SubRC;
2776 unsigned SubOpc, ZeroReg;
2777 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) {
2778 SubOpc = AArch64::SUBWrr;
2779 SubRC = &AArch64::GPR32spRegClass;
2780 ZeroReg = AArch64::WZR;
2781 Opc = AArch64::MADDWrrr;
2782 RC = &AArch64::GPR32RegClass;
2783 } else {
2784 SubOpc = AArch64::SUBXrr;
2785 SubRC = &AArch64::GPR64spRegClass;
2786 ZeroReg = AArch64::XZR;
2787 Opc = AArch64::MADDXrrr;
2788 RC = &AArch64::GPR64RegClass;
2789 }
2790 unsigned NewVR = MRI.createVirtualRegister(SubRC);
2791 // SUB NewVR, 0, C
2792 MachineInstrBuilder MIB1 =
2793 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
2794 .addReg(ZeroReg)
2795 .addOperand(Root.getOperand(2));
2796 InsInstrs.push_back(MIB1);
2797 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2798 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2799 break;
2800 }
2801 case MachineCombinerPattern::MC_MULSUBW_OP2:
2802 case MachineCombinerPattern::MC_MULSUBX_OP2:
2803 // MUL I=A,B,0
2804 // SUB R,C,I
2805 // ==> MSUB R,A,B,C (computes C - A*B)
2806 // --- Create(MSUB);
2807 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP2) {
2808 Opc = AArch64::MSUBWrrr;
2809 RC = &AArch64::GPR32RegClass;
2810 } else {
2811 Opc = AArch64::MSUBXrrr;
2812 RC = &AArch64::GPR64RegClass;
2813 }
2814 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2815 break;
2816 case MachineCombinerPattern::MC_MULSUBWI_OP1:
2817 case MachineCombinerPattern::MC_MULSUBXI_OP1: {
2818 // MUL I=A,B,0
2819 // SUB R,I, Imm
2820 // ==> ORR V, ZR, -Imm
2821 // ==> MADD R,A,B,V // = -Imm + A*B
2822 // --- Create(MADD);
2823 const TargetRegisterClass *OrrRC;
2824 unsigned BitSize, OrrOpc, ZeroReg;
2825 if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) {
2826 OrrOpc = AArch64::ORRWri;
2827 OrrRC = &AArch64::GPR32spRegClass;
2828 BitSize = 32;
2829 ZeroReg = AArch64::WZR;
2830 Opc = AArch64::MADDWrrr;
2831 RC = &AArch64::GPR32RegClass;
2832 } else {
2833 OrrOpc = AArch64::ORRXri;
2834 OrrRC = &AArch64::GPR64spRegClass;
2835 BitSize = 64;
2836 ZeroReg = AArch64::XZR;
2837 Opc = AArch64::MADDXrrr;
2838 RC = &AArch64::GPR64RegClass;
2839 }
2840 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2841 int Imm = Root.getOperand(2).getImm();
2842 if (Root.getOperand(3).isImm()) {
2843 unsigned Val = Root.getOperand(3).getImm();
2844 Imm = Imm << Val;
2845 }
2846 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
2847 uint64_t Encoding;
2848 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2849 MachineInstrBuilder MIB1 =
2850 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2851 .addReg(ZeroReg)
2852 .addImm(Encoding);
2853 InsInstrs.push_back(MIB1);
2854 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2855 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2856 }
2857 break;
2858 }
2859 } // end switch (Pattern)
2860 // Record MUL and ADD/SUB for deletion
2861 DelInstrs.push_back(MUL);
2862 DelInstrs.push_back(&Root);
2863
2864 return;
2865 }
2866
2867 /// \brief Replace csincr-branch sequence by simple conditional branch
2868 ///
2869 /// Examples:
2870 /// 1.
2871 /// csinc w9, wzr, wzr, <condition code>
2872 /// tbnz w9, #0, 0x44
2873 /// to
2874 /// b.<inverted condition code>
2875 ///
2876 /// 2.
2877 /// csinc w9, wzr, wzr, <condition code>
2878 /// tbz w9, #0, 0x44
2879 /// to
2880 /// b.<condition code>
2881 ///
2882 /// \param MI Conditional Branch
2883 /// \return True when the simple conditional branch is generated
2884 ///
optimizeCondBranch(MachineInstr * MI) const2885 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
2886 bool IsNegativeBranch = false;
2887 bool IsTestAndBranch = false;
2888 unsigned TargetBBInMI = 0;
2889 switch (MI->getOpcode()) {
2890 default:
2891 llvm_unreachable("Unknown branch instruction?");
2892 case AArch64::Bcc:
2893 return false;
2894 case AArch64::CBZW:
2895 case AArch64::CBZX:
2896 TargetBBInMI = 1;
2897 break;
2898 case AArch64::CBNZW:
2899 case AArch64::CBNZX:
2900 TargetBBInMI = 1;
2901 IsNegativeBranch = true;
2902 break;
2903 case AArch64::TBZW:
2904 case AArch64::TBZX:
2905 TargetBBInMI = 2;
2906 IsTestAndBranch = true;
2907 break;
2908 case AArch64::TBNZW:
2909 case AArch64::TBNZX:
2910 TargetBBInMI = 2;
2911 IsNegativeBranch = true;
2912 IsTestAndBranch = true;
2913 break;
2914 }
2915 // So we increment a zero register and test for bits other
2916 // than bit 0? Conservatively bail out in case the verifier
2917 // missed this case.
2918 if (IsTestAndBranch && MI->getOperand(1).getImm())
2919 return false;
2920
2921 // Find Definition.
2922 assert(MI->getParent() && "Incomplete machine instruciton\n");
2923 MachineBasicBlock *MBB = MI->getParent();
2924 MachineFunction *MF = MBB->getParent();
2925 MachineRegisterInfo *MRI = &MF->getRegInfo();
2926 unsigned VReg = MI->getOperand(0).getReg();
2927 if (!TargetRegisterInfo::isVirtualRegister(VReg))
2928 return false;
2929
2930 MachineInstr *DefMI = MRI->getVRegDef(VReg);
2931
2932 // Look for CSINC
2933 if (!(DefMI->getOpcode() == AArch64::CSINCWr &&
2934 DefMI->getOperand(1).getReg() == AArch64::WZR &&
2935 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
2936 !(DefMI->getOpcode() == AArch64::CSINCXr &&
2937 DefMI->getOperand(1).getReg() == AArch64::XZR &&
2938 DefMI->getOperand(2).getReg() == AArch64::XZR))
2939 return false;
2940
2941 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
2942 return false;
2943
2944 AArch64CC::CondCode CC =
2945 (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
2946 bool CheckOnlyCCWrites = true;
2947 // Convert only when the condition code is not modified between
2948 // the CSINC and the branch. The CC may be used by other
2949 // instructions in between.
2950 if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
2951 return false;
2952 MachineBasicBlock &RefToMBB = *MBB;
2953 MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
2954 DebugLoc DL = MI->getDebugLoc();
2955 if (IsNegativeBranch)
2956 CC = AArch64CC::getInvertedCondCode(CC);
2957 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
2958 MI->eraseFromParent();
2959 return true;
2960 }
2961