1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Base ARM implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "ARMBaseInstrInfo.h"
14 #include "ARMBaseRegisterInfo.h"
15 #include "ARMConstantPoolValue.h"
16 #include "ARMFeatures.h"
17 #include "ARMHazardRecognizer.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "MCTargetDesc/ARMBaseInfo.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/CodeGen/LiveVariables.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineConstantPool.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstr.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineMemOperand.h"
35 #include "llvm/CodeGen/MachineModuleInfo.h"
36 #include "llvm/CodeGen/MachineOperand.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
39 #include "llvm/CodeGen/SelectionDAGNodes.h"
40 #include "llvm/CodeGen/TargetInstrInfo.h"
41 #include "llvm/CodeGen/TargetRegisterInfo.h"
42 #include "llvm/CodeGen/TargetSchedule.h"
43 #include "llvm/IR/Attributes.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DebugLoc.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/MC/MCAsmInfo.h"
49 #include "llvm/MC/MCInstrDesc.h"
50 #include "llvm/MC/MCInstrItineraries.h"
51 #include "llvm/Support/BranchProbability.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Target/TargetMachine.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstdint>
62 #include <iterator>
63 #include <new>
64 #include <utility>
65 #include <vector>
66
67 using namespace llvm;
68
69 #define DEBUG_TYPE "arm-instrinfo"
70
71 #define GET_INSTRINFO_CTOR_DTOR
72 #include "ARMGenInstrInfo.inc"
73
74 static cl::opt<bool>
75 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
76 cl::desc("Enable ARM 2-addr to 3-addr conv"));
77
78 /// ARM_MLxEntry - Record information about MLA / MLS instructions.
79 struct ARM_MLxEntry {
80 uint16_t MLxOpc; // MLA / MLS opcode
81 uint16_t MulOpc; // Expanded multiplication opcode
82 uint16_t AddSubOpc; // Expanded add / sub opcode
83 bool NegAcc; // True if the acc is negated before the add / sub.
84 bool HasLane; // True if instruction has an extra "lane" operand.
85 };
86
87 static const ARM_MLxEntry ARM_MLxTable[] = {
88 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane
89 // fp scalar ops
90 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false },
91 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false },
92 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false },
93 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false },
94 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false },
95 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false },
96 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false },
97 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false },
98
99 // fp SIMD ops
100 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false },
101 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false },
102 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false },
103 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false },
104 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true },
105 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true },
106 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true },
107 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
108 };
109
ARMBaseInstrInfo(const ARMSubtarget & STI)110 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
111 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
112 Subtarget(STI) {
113 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
114 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
115 llvm_unreachable("Duplicated entries?");
116 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
117 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
118 }
119 }
120
121 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
122 // currently defaults to no prepass hazard recognizer.
123 ScheduleHazardRecognizer *
CreateTargetHazardRecognizer(const TargetSubtargetInfo * STI,const ScheduleDAG * DAG) const124 ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
125 const ScheduleDAG *DAG) const {
126 if (usePreRAHazardRecognizer()) {
127 const InstrItineraryData *II =
128 static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData();
129 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
130 }
131 return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG);
132 }
133
134 ScheduleHazardRecognizer *ARMBaseInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const135 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
136 const ScheduleDAG *DAG) const {
137 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
138 return new ARMHazardRecognizer(II, DAG);
139 return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
140 }
141
convertToThreeAddress(MachineFunction::iterator & MFI,MachineInstr & MI,LiveVariables * LV) const142 MachineInstr *ARMBaseInstrInfo::convertToThreeAddress(
143 MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
144 // FIXME: Thumb2 support.
145
146 if (!EnableARM3Addr)
147 return nullptr;
148
149 MachineFunction &MF = *MI.getParent()->getParent();
150 uint64_t TSFlags = MI.getDesc().TSFlags;
151 bool isPre = false;
152 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
153 default: return nullptr;
154 case ARMII::IndexModePre:
155 isPre = true;
156 break;
157 case ARMII::IndexModePost:
158 break;
159 }
160
161 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
162 // operation.
163 unsigned MemOpc = getUnindexedOpcode(MI.getOpcode());
164 if (MemOpc == 0)
165 return nullptr;
166
167 MachineInstr *UpdateMI = nullptr;
168 MachineInstr *MemMI = nullptr;
169 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
170 const MCInstrDesc &MCID = MI.getDesc();
171 unsigned NumOps = MCID.getNumOperands();
172 bool isLoad = !MI.mayStore();
173 const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0);
174 const MachineOperand &Base = MI.getOperand(2);
175 const MachineOperand &Offset = MI.getOperand(NumOps - 3);
176 Register WBReg = WB.getReg();
177 Register BaseReg = Base.getReg();
178 Register OffReg = Offset.getReg();
179 unsigned OffImm = MI.getOperand(NumOps - 2).getImm();
180 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm();
181 switch (AddrMode) {
182 default: llvm_unreachable("Unknown indexed op!");
183 case ARMII::AddrMode2: {
184 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
185 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
186 if (OffReg == 0) {
187 if (ARM_AM::getSOImmVal(Amt) == -1)
188 // Can't encode it in a so_imm operand. This transformation will
189 // add more than 1 instruction. Abandon!
190 return nullptr;
191 UpdateMI = BuildMI(MF, MI.getDebugLoc(),
192 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
193 .addReg(BaseReg)
194 .addImm(Amt)
195 .add(predOps(Pred))
196 .add(condCodeOp());
197 } else if (Amt != 0) {
198 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
199 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
200 UpdateMI = BuildMI(MF, MI.getDebugLoc(),
201 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
202 .addReg(BaseReg)
203 .addReg(OffReg)
204 .addReg(0)
205 .addImm(SOOpc)
206 .add(predOps(Pred))
207 .add(condCodeOp());
208 } else
209 UpdateMI = BuildMI(MF, MI.getDebugLoc(),
210 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
211 .addReg(BaseReg)
212 .addReg(OffReg)
213 .add(predOps(Pred))
214 .add(condCodeOp());
215 break;
216 }
217 case ARMII::AddrMode3 : {
218 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
219 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
220 if (OffReg == 0)
221 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
222 UpdateMI = BuildMI(MF, MI.getDebugLoc(),
223 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
224 .addReg(BaseReg)
225 .addImm(Amt)
226 .add(predOps(Pred))
227 .add(condCodeOp());
228 else
229 UpdateMI = BuildMI(MF, MI.getDebugLoc(),
230 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
231 .addReg(BaseReg)
232 .addReg(OffReg)
233 .add(predOps(Pred))
234 .add(condCodeOp());
235 break;
236 }
237 }
238
239 std::vector<MachineInstr*> NewMIs;
240 if (isPre) {
241 if (isLoad)
242 MemMI =
243 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
244 .addReg(WBReg)
245 .addImm(0)
246 .addImm(Pred);
247 else
248 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
249 .addReg(MI.getOperand(1).getReg())
250 .addReg(WBReg)
251 .addReg(0)
252 .addImm(0)
253 .addImm(Pred);
254 NewMIs.push_back(MemMI);
255 NewMIs.push_back(UpdateMI);
256 } else {
257 if (isLoad)
258 MemMI =
259 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
260 .addReg(BaseReg)
261 .addImm(0)
262 .addImm(Pred);
263 else
264 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
265 .addReg(MI.getOperand(1).getReg())
266 .addReg(BaseReg)
267 .addReg(0)
268 .addImm(0)
269 .addImm(Pred);
270 if (WB.isDead())
271 UpdateMI->getOperand(0).setIsDead();
272 NewMIs.push_back(UpdateMI);
273 NewMIs.push_back(MemMI);
274 }
275
276 // Transfer LiveVariables states, kill / dead info.
277 if (LV) {
278 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
279 MachineOperand &MO = MI.getOperand(i);
280 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
281 Register Reg = MO.getReg();
282
283 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
284 if (MO.isDef()) {
285 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
286 if (MO.isDead())
287 LV->addVirtualRegisterDead(Reg, *NewMI);
288 }
289 if (MO.isUse() && MO.isKill()) {
290 for (unsigned j = 0; j < 2; ++j) {
291 // Look at the two new MI's in reverse order.
292 MachineInstr *NewMI = NewMIs[j];
293 if (!NewMI->readsRegister(Reg))
294 continue;
295 LV->addVirtualRegisterKilled(Reg, *NewMI);
296 if (VI.removeKill(MI))
297 VI.Kills.push_back(NewMI);
298 break;
299 }
300 }
301 }
302 }
303 }
304
305 MachineBasicBlock::iterator MBBI = MI.getIterator();
306 MFI->insert(MBBI, NewMIs[1]);
307 MFI->insert(MBBI, NewMIs[0]);
308 return NewMIs[0];
309 }
310
311 // Branch analysis.
analyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const312 bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
313 MachineBasicBlock *&TBB,
314 MachineBasicBlock *&FBB,
315 SmallVectorImpl<MachineOperand> &Cond,
316 bool AllowModify) const {
317 TBB = nullptr;
318 FBB = nullptr;
319
320 MachineBasicBlock::iterator I = MBB.end();
321 if (I == MBB.begin())
322 return false; // Empty blocks are easy.
323 --I;
324
325 // Walk backwards from the end of the basic block until the branch is
326 // analyzed or we give up.
327 while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) {
328 // Flag to be raised on unanalyzeable instructions. This is useful in cases
329 // where we want to clean up on the end of the basic block before we bail
330 // out.
331 bool CantAnalyze = false;
332
333 // Skip over DEBUG values and predicated nonterminators.
334 while (I->isDebugInstr() || !I->isTerminator()) {
335 if (I == MBB.begin())
336 return false;
337 --I;
338 }
339
340 if (isIndirectBranchOpcode(I->getOpcode()) ||
341 isJumpTableBranchOpcode(I->getOpcode())) {
342 // Indirect branches and jump tables can't be analyzed, but we still want
343 // to clean up any instructions at the tail of the basic block.
344 CantAnalyze = true;
345 } else if (isUncondBranchOpcode(I->getOpcode())) {
346 TBB = I->getOperand(0).getMBB();
347 } else if (isCondBranchOpcode(I->getOpcode())) {
348 // Bail out if we encounter multiple conditional branches.
349 if (!Cond.empty())
350 return true;
351
352 assert(!FBB && "FBB should have been null.");
353 FBB = TBB;
354 TBB = I->getOperand(0).getMBB();
355 Cond.push_back(I->getOperand(1));
356 Cond.push_back(I->getOperand(2));
357 } else if (I->isReturn()) {
358 // Returns can't be analyzed, but we should run cleanup.
359 CantAnalyze = !isPredicated(*I);
360 } else {
361 // We encountered other unrecognized terminator. Bail out immediately.
362 return true;
363 }
364
365 // Cleanup code - to be run for unpredicated unconditional branches and
366 // returns.
367 if (!isPredicated(*I) &&
368 (isUncondBranchOpcode(I->getOpcode()) ||
369 isIndirectBranchOpcode(I->getOpcode()) ||
370 isJumpTableBranchOpcode(I->getOpcode()) ||
371 I->isReturn())) {
372 // Forget any previous condition branch information - it no longer applies.
373 Cond.clear();
374 FBB = nullptr;
375
376 // If we can modify the function, delete everything below this
377 // unconditional branch.
378 if (AllowModify) {
379 MachineBasicBlock::iterator DI = std::next(I);
380 while (DI != MBB.end()) {
381 MachineInstr &InstToDelete = *DI;
382 ++DI;
383 InstToDelete.eraseFromParent();
384 }
385 }
386 }
387
388 if (CantAnalyze)
389 return true;
390
391 if (I == MBB.begin())
392 return false;
393
394 --I;
395 }
396
397 // We made it past the terminators without bailing out - we must have
398 // analyzed this branch successfully.
399 return false;
400 }
401
removeBranch(MachineBasicBlock & MBB,int * BytesRemoved) const402 unsigned ARMBaseInstrInfo::removeBranch(MachineBasicBlock &MBB,
403 int *BytesRemoved) const {
404 assert(!BytesRemoved && "code size not handled");
405
406 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
407 if (I == MBB.end())
408 return 0;
409
410 if (!isUncondBranchOpcode(I->getOpcode()) &&
411 !isCondBranchOpcode(I->getOpcode()))
412 return 0;
413
414 // Remove the branch.
415 I->eraseFromParent();
416
417 I = MBB.end();
418
419 if (I == MBB.begin()) return 1;
420 --I;
421 if (!isCondBranchOpcode(I->getOpcode()))
422 return 1;
423
424 // Remove the branch.
425 I->eraseFromParent();
426 return 2;
427 }
428
insertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,const DebugLoc & DL,int * BytesAdded) const429 unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB,
430 MachineBasicBlock *TBB,
431 MachineBasicBlock *FBB,
432 ArrayRef<MachineOperand> Cond,
433 const DebugLoc &DL,
434 int *BytesAdded) const {
435 assert(!BytesAdded && "code size not handled");
436 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
437 int BOpc = !AFI->isThumbFunction()
438 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
439 int BccOpc = !AFI->isThumbFunction()
440 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
441 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function();
442
443 // Shouldn't be a fall through.
444 assert(TBB && "insertBranch must not be told to insert a fallthrough");
445 assert((Cond.size() == 2 || Cond.size() == 0) &&
446 "ARM branch conditions have two components!");
447
448 // For conditional branches, we use addOperand to preserve CPSR flags.
449
450 if (!FBB) {
451 if (Cond.empty()) { // Unconditional branch?
452 if (isThumb)
453 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).add(predOps(ARMCC::AL));
454 else
455 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
456 } else
457 BuildMI(&MBB, DL, get(BccOpc))
458 .addMBB(TBB)
459 .addImm(Cond[0].getImm())
460 .add(Cond[1]);
461 return 1;
462 }
463
464 // Two-way conditional branch.
465 BuildMI(&MBB, DL, get(BccOpc))
466 .addMBB(TBB)
467 .addImm(Cond[0].getImm())
468 .add(Cond[1]);
469 if (isThumb)
470 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).add(predOps(ARMCC::AL));
471 else
472 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
473 return 2;
474 }
475
476 bool ARMBaseInstrInfo::
reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const477 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
478 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
479 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
480 return false;
481 }
482
isPredicated(const MachineInstr & MI) const483 bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const {
484 if (MI.isBundle()) {
485 MachineBasicBlock::const_instr_iterator I = MI.getIterator();
486 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
487 while (++I != E && I->isInsideBundle()) {
488 int PIdx = I->findFirstPredOperandIdx();
489 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
490 return true;
491 }
492 return false;
493 }
494
495 int PIdx = MI.findFirstPredOperandIdx();
496 return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL;
497 }
498
createMIROperandComment(const MachineInstr & MI,const MachineOperand & Op,unsigned OpIdx,const TargetRegisterInfo * TRI) const499 std::string ARMBaseInstrInfo::createMIROperandComment(
500 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
501 const TargetRegisterInfo *TRI) const {
502
503 // First, let's see if there is a generic comment for this operand
504 std::string GenericComment =
505 TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI);
506 if (!GenericComment.empty())
507 return GenericComment;
508
509 // If not, check if we have an immediate operand.
510 if (Op.getType() != MachineOperand::MO_Immediate)
511 return std::string();
512
513 // And print its corresponding condition code if the immediate is a
514 // predicate.
515 int FirstPredOp = MI.findFirstPredOperandIdx();
516 if (FirstPredOp != (int) OpIdx)
517 return std::string();
518
519 std::string CC = "CC::";
520 CC += ARMCondCodeToString((ARMCC::CondCodes)Op.getImm());
521 return CC;
522 }
523
PredicateInstruction(MachineInstr & MI,ArrayRef<MachineOperand> Pred) const524 bool ARMBaseInstrInfo::PredicateInstruction(
525 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
526 unsigned Opc = MI.getOpcode();
527 if (isUncondBranchOpcode(Opc)) {
528 MI.setDesc(get(getMatchingCondBranchOpcode(Opc)));
529 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
530 .addImm(Pred[0].getImm())
531 .addReg(Pred[1].getReg());
532 return true;
533 }
534
535 int PIdx = MI.findFirstPredOperandIdx();
536 if (PIdx != -1) {
537 MachineOperand &PMO = MI.getOperand(PIdx);
538 PMO.setImm(Pred[0].getImm());
539 MI.getOperand(PIdx+1).setReg(Pred[1].getReg());
540 return true;
541 }
542 return false;
543 }
544
SubsumesPredicate(ArrayRef<MachineOperand> Pred1,ArrayRef<MachineOperand> Pred2) const545 bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
546 ArrayRef<MachineOperand> Pred2) const {
547 if (Pred1.size() > 2 || Pred2.size() > 2)
548 return false;
549
550 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
551 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
552 if (CC1 == CC2)
553 return true;
554
555 switch (CC1) {
556 default:
557 return false;
558 case ARMCC::AL:
559 return true;
560 case ARMCC::HS:
561 return CC2 == ARMCC::HI;
562 case ARMCC::LS:
563 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
564 case ARMCC::GE:
565 return CC2 == ARMCC::GT;
566 case ARMCC::LE:
567 return CC2 == ARMCC::LT;
568 }
569 }
570
DefinesPredicate(MachineInstr & MI,std::vector<MachineOperand> & Pred) const571 bool ARMBaseInstrInfo::DefinesPredicate(
572 MachineInstr &MI, std::vector<MachineOperand> &Pred) const {
573 bool Found = false;
574 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
575 const MachineOperand &MO = MI.getOperand(i);
576 if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) ||
577 (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) {
578 Pred.push_back(MO);
579 Found = true;
580 }
581 }
582
583 return Found;
584 }
585
isCPSRDefined(const MachineInstr & MI)586 bool ARMBaseInstrInfo::isCPSRDefined(const MachineInstr &MI) {
587 for (const auto &MO : MI.operands())
588 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
589 return true;
590 return false;
591 }
592
isAddrMode3OpImm(const MachineInstr & MI,unsigned Op) const593 bool ARMBaseInstrInfo::isAddrMode3OpImm(const MachineInstr &MI,
594 unsigned Op) const {
595 const MachineOperand &Offset = MI.getOperand(Op + 1);
596 return Offset.getReg() != 0;
597 }
598
599 // Load with negative register offset requires additional 1cyc and +I unit
600 // for Cortex A57
isAddrMode3OpMinusReg(const MachineInstr & MI,unsigned Op) const601 bool ARMBaseInstrInfo::isAddrMode3OpMinusReg(const MachineInstr &MI,
602 unsigned Op) const {
603 const MachineOperand &Offset = MI.getOperand(Op + 1);
604 const MachineOperand &Opc = MI.getOperand(Op + 2);
605 assert(Opc.isImm());
606 assert(Offset.isReg());
607 int64_t OpcImm = Opc.getImm();
608
609 bool isSub = ARM_AM::getAM3Op(OpcImm) == ARM_AM::sub;
610 return (isSub && Offset.getReg() != 0);
611 }
612
isLdstScaledReg(const MachineInstr & MI,unsigned Op) const613 bool ARMBaseInstrInfo::isLdstScaledReg(const MachineInstr &MI,
614 unsigned Op) const {
615 const MachineOperand &Opc = MI.getOperand(Op + 2);
616 unsigned OffImm = Opc.getImm();
617 return ARM_AM::getAM2ShiftOpc(OffImm) != ARM_AM::no_shift;
618 }
619
620 // Load, scaled register offset, not plus LSL2
isLdstScaledRegNotPlusLsl2(const MachineInstr & MI,unsigned Op) const621 bool ARMBaseInstrInfo::isLdstScaledRegNotPlusLsl2(const MachineInstr &MI,
622 unsigned Op) const {
623 const MachineOperand &Opc = MI.getOperand(Op + 2);
624 unsigned OffImm = Opc.getImm();
625
626 bool isAdd = ARM_AM::getAM2Op(OffImm) == ARM_AM::add;
627 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
628 ARM_AM::ShiftOpc ShiftOpc = ARM_AM::getAM2ShiftOpc(OffImm);
629 if (ShiftOpc == ARM_AM::no_shift) return false; // not scaled
630 bool SimpleScaled = (isAdd && ShiftOpc == ARM_AM::lsl && Amt == 2);
631 return !SimpleScaled;
632 }
633
634 // Minus reg for ldstso addr mode
isLdstSoMinusReg(const MachineInstr & MI,unsigned Op) const635 bool ARMBaseInstrInfo::isLdstSoMinusReg(const MachineInstr &MI,
636 unsigned Op) const {
637 unsigned OffImm = MI.getOperand(Op + 2).getImm();
638 return ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
639 }
640
641 // Load, scaled register offset
isAm2ScaledReg(const MachineInstr & MI,unsigned Op) const642 bool ARMBaseInstrInfo::isAm2ScaledReg(const MachineInstr &MI,
643 unsigned Op) const {
644 unsigned OffImm = MI.getOperand(Op + 2).getImm();
645 return ARM_AM::getAM2ShiftOpc(OffImm) != ARM_AM::no_shift;
646 }
647
isEligibleForITBlock(const MachineInstr * MI)648 static bool isEligibleForITBlock(const MachineInstr *MI) {
649 switch (MI->getOpcode()) {
650 default: return true;
651 case ARM::tADC: // ADC (register) T1
652 case ARM::tADDi3: // ADD (immediate) T1
653 case ARM::tADDi8: // ADD (immediate) T2
654 case ARM::tADDrr: // ADD (register) T1
655 case ARM::tAND: // AND (register) T1
656 case ARM::tASRri: // ASR (immediate) T1
657 case ARM::tASRrr: // ASR (register) T1
658 case ARM::tBIC: // BIC (register) T1
659 case ARM::tEOR: // EOR (register) T1
660 case ARM::tLSLri: // LSL (immediate) T1
661 case ARM::tLSLrr: // LSL (register) T1
662 case ARM::tLSRri: // LSR (immediate) T1
663 case ARM::tLSRrr: // LSR (register) T1
664 case ARM::tMUL: // MUL T1
665 case ARM::tMVN: // MVN (register) T1
666 case ARM::tORR: // ORR (register) T1
667 case ARM::tROR: // ROR (register) T1
668 case ARM::tRSB: // RSB (immediate) T1
669 case ARM::tSBC: // SBC (register) T1
670 case ARM::tSUBi3: // SUB (immediate) T1
671 case ARM::tSUBi8: // SUB (immediate) T2
672 case ARM::tSUBrr: // SUB (register) T1
673 return !ARMBaseInstrInfo::isCPSRDefined(*MI);
674 }
675 }
676
677 /// isPredicable - Return true if the specified instruction can be predicated.
678 /// By default, this returns true for every instruction with a
679 /// PredicateOperand.
isPredicable(const MachineInstr & MI) const680 bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const {
681 if (!MI.isPredicable())
682 return false;
683
684 if (MI.isBundle())
685 return false;
686
687 if (!isEligibleForITBlock(&MI))
688 return false;
689
690 const ARMFunctionInfo *AFI =
691 MI.getParent()->getParent()->getInfo<ARMFunctionInfo>();
692
693 // Neon instructions in Thumb2 IT blocks are deprecated, see ARMARM.
694 // In their ARM encoding, they can't be encoded in a conditional form.
695 if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON)
696 return false;
697
698 if (AFI->isThumb2Function()) {
699 if (getSubtarget().restrictIT())
700 return isV8EligibleForIT(&MI);
701 }
702
703 return true;
704 }
705
706 namespace llvm {
707
IsCPSRDead(const MachineInstr * MI)708 template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) {
709 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
710 const MachineOperand &MO = MI->getOperand(i);
711 if (!MO.isReg() || MO.isUndef() || MO.isUse())
712 continue;
713 if (MO.getReg() != ARM::CPSR)
714 continue;
715 if (!MO.isDead())
716 return false;
717 }
718 // all definitions of CPSR are dead
719 return true;
720 }
721
722 } // end namespace llvm
723
724 /// GetInstSize - Return the size of the specified MachineInstr.
725 ///
getInstSizeInBytes(const MachineInstr & MI) const726 unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
727 const MachineBasicBlock &MBB = *MI.getParent();
728 const MachineFunction *MF = MBB.getParent();
729 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
730
731 const MCInstrDesc &MCID = MI.getDesc();
732 if (MCID.getSize())
733 return MCID.getSize();
734
735 switch (MI.getOpcode()) {
736 default:
737 // pseudo-instruction sizes are zero.
738 return 0;
739 case TargetOpcode::BUNDLE:
740 return getInstBundleLength(MI);
741 case ARM::MOVi16_ga_pcrel:
742 case ARM::MOVTi16_ga_pcrel:
743 case ARM::t2MOVi16_ga_pcrel:
744 case ARM::t2MOVTi16_ga_pcrel:
745 return 4;
746 case ARM::MOVi32imm:
747 case ARM::t2MOVi32imm:
748 return 8;
749 case ARM::CONSTPOOL_ENTRY:
750 case ARM::JUMPTABLE_INSTS:
751 case ARM::JUMPTABLE_ADDRS:
752 case ARM::JUMPTABLE_TBB:
753 case ARM::JUMPTABLE_TBH:
754 // If this machine instr is a constant pool entry, its size is recorded as
755 // operand #2.
756 return MI.getOperand(2).getImm();
757 case ARM::Int_eh_sjlj_longjmp:
758 return 16;
759 case ARM::tInt_eh_sjlj_longjmp:
760 return 10;
761 case ARM::tInt_WIN_eh_sjlj_longjmp:
762 return 12;
763 case ARM::Int_eh_sjlj_setjmp:
764 case ARM::Int_eh_sjlj_setjmp_nofp:
765 return 20;
766 case ARM::tInt_eh_sjlj_setjmp:
767 case ARM::t2Int_eh_sjlj_setjmp:
768 case ARM::t2Int_eh_sjlj_setjmp_nofp:
769 return 12;
770 case ARM::SPACE:
771 return MI.getOperand(1).getImm();
772 case ARM::INLINEASM:
773 case ARM::INLINEASM_BR: {
774 // If this machine instr is an inline asm, measure it.
775 unsigned Size = getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
776 if (!MF->getInfo<ARMFunctionInfo>()->isThumbFunction())
777 Size = alignTo(Size, 4);
778 return Size;
779 }
780 }
781 }
782
getInstBundleLength(const MachineInstr & MI) const783 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
784 unsigned Size = 0;
785 MachineBasicBlock::const_instr_iterator I = MI.getIterator();
786 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
787 while (++I != E && I->isInsideBundle()) {
788 assert(!I->isBundle() && "No nested bundle!");
789 Size += getInstSizeInBytes(*I);
790 }
791 return Size;
792 }
793
copyFromCPSR(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned DestReg,bool KillSrc,const ARMSubtarget & Subtarget) const794 void ARMBaseInstrInfo::copyFromCPSR(MachineBasicBlock &MBB,
795 MachineBasicBlock::iterator I,
796 unsigned DestReg, bool KillSrc,
797 const ARMSubtarget &Subtarget) const {
798 unsigned Opc = Subtarget.isThumb()
799 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
800 : ARM::MRS;
801
802 MachineInstrBuilder MIB =
803 BuildMI(MBB, I, I->getDebugLoc(), get(Opc), DestReg);
804
805 // There is only 1 A/R class MRS instruction, and it always refers to
806 // APSR. However, there are lots of other possibilities on M-class cores.
807 if (Subtarget.isMClass())
808 MIB.addImm(0x800);
809
810 MIB.add(predOps(ARMCC::AL))
811 .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc));
812 }
813
copyToCPSR(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned SrcReg,bool KillSrc,const ARMSubtarget & Subtarget) const814 void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB,
815 MachineBasicBlock::iterator I,
816 unsigned SrcReg, bool KillSrc,
817 const ARMSubtarget &Subtarget) const {
818 unsigned Opc = Subtarget.isThumb()
819 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
820 : ARM::MSR;
821
822 MachineInstrBuilder MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Opc));
823
824 if (Subtarget.isMClass())
825 MIB.addImm(0x800);
826 else
827 MIB.addImm(8);
828
829 MIB.addReg(SrcReg, getKillRegState(KillSrc))
830 .add(predOps(ARMCC::AL))
831 .addReg(ARM::CPSR, RegState::Implicit | RegState::Define);
832 }
833
addUnpredicatedMveVpredNOp(MachineInstrBuilder & MIB)834 void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) {
835 MIB.addImm(ARMVCC::None);
836 MIB.addReg(0);
837 }
838
addUnpredicatedMveVpredROp(MachineInstrBuilder & MIB,Register DestReg)839 void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB,
840 Register DestReg) {
841 addUnpredicatedMveVpredNOp(MIB);
842 MIB.addReg(DestReg, RegState::Undef);
843 }
844
addPredicatedMveVpredNOp(MachineInstrBuilder & MIB,unsigned Cond)845 void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) {
846 MIB.addImm(Cond);
847 MIB.addReg(ARM::VPR, RegState::Implicit);
848 }
849
addPredicatedMveVpredROp(MachineInstrBuilder & MIB,unsigned Cond,unsigned Inactive)850 void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB,
851 unsigned Cond, unsigned Inactive) {
852 addPredicatedMveVpredNOp(MIB, Cond);
853 MIB.addReg(Inactive);
854 }
855
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc) const856 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
857 MachineBasicBlock::iterator I,
858 const DebugLoc &DL, MCRegister DestReg,
859 MCRegister SrcReg, bool KillSrc) const {
860 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
861 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
862
863 if (GPRDest && GPRSrc) {
864 BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
865 .addReg(SrcReg, getKillRegState(KillSrc))
866 .add(predOps(ARMCC::AL))
867 .add(condCodeOp());
868 return;
869 }
870
871 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
872 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
873
874 unsigned Opc = 0;
875 if (SPRDest && SPRSrc)
876 Opc = ARM::VMOVS;
877 else if (GPRDest && SPRSrc)
878 Opc = ARM::VMOVRS;
879 else if (SPRDest && GPRSrc)
880 Opc = ARM::VMOVSR;
881 else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64())
882 Opc = ARM::VMOVD;
883 else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
884 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
885
886 if (Opc) {
887 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
888 MIB.addReg(SrcReg, getKillRegState(KillSrc));
889 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR)
890 MIB.addReg(SrcReg, getKillRegState(KillSrc));
891 if (Opc == ARM::MVE_VORR)
892 addUnpredicatedMveVpredROp(MIB, DestReg);
893 else
894 MIB.add(predOps(ARMCC::AL));
895 return;
896 }
897
898 // Handle register classes that require multiple instructions.
899 unsigned BeginIdx = 0;
900 unsigned SubRegs = 0;
901 int Spacing = 1;
902
903 // Use VORRq when possible.
904 if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) {
905 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
906 BeginIdx = ARM::qsub_0;
907 SubRegs = 2;
908 } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
909 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
910 BeginIdx = ARM::qsub_0;
911 SubRegs = 4;
912 // Fall back to VMOVD.
913 } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) {
914 Opc = ARM::VMOVD;
915 BeginIdx = ARM::dsub_0;
916 SubRegs = 2;
917 } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) {
918 Opc = ARM::VMOVD;
919 BeginIdx = ARM::dsub_0;
920 SubRegs = 3;
921 } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) {
922 Opc = ARM::VMOVD;
923 BeginIdx = ARM::dsub_0;
924 SubRegs = 4;
925 } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) {
926 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
927 BeginIdx = ARM::gsub_0;
928 SubRegs = 2;
929 } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) {
930 Opc = ARM::VMOVD;
931 BeginIdx = ARM::dsub_0;
932 SubRegs = 2;
933 Spacing = 2;
934 } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) {
935 Opc = ARM::VMOVD;
936 BeginIdx = ARM::dsub_0;
937 SubRegs = 3;
938 Spacing = 2;
939 } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) {
940 Opc = ARM::VMOVD;
941 BeginIdx = ARM::dsub_0;
942 SubRegs = 4;
943 Spacing = 2;
944 } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) &&
945 !Subtarget.hasFP64()) {
946 Opc = ARM::VMOVS;
947 BeginIdx = ARM::ssub_0;
948 SubRegs = 2;
949 } else if (SrcReg == ARM::CPSR) {
950 copyFromCPSR(MBB, I, DestReg, KillSrc, Subtarget);
951 return;
952 } else if (DestReg == ARM::CPSR) {
953 copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget);
954 return;
955 } else if (DestReg == ARM::VPR) {
956 assert(ARM::GPRRegClass.contains(SrcReg));
957 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_P0), DestReg)
958 .addReg(SrcReg, getKillRegState(KillSrc))
959 .add(predOps(ARMCC::AL));
960 return;
961 } else if (SrcReg == ARM::VPR) {
962 assert(ARM::GPRRegClass.contains(DestReg));
963 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_P0), DestReg)
964 .addReg(SrcReg, getKillRegState(KillSrc))
965 .add(predOps(ARMCC::AL));
966 return;
967 } else if (DestReg == ARM::FPSCR_NZCV) {
968 assert(ARM::GPRRegClass.contains(SrcReg));
969 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
970 .addReg(SrcReg, getKillRegState(KillSrc))
971 .add(predOps(ARMCC::AL));
972 return;
973 } else if (SrcReg == ARM::FPSCR_NZCV) {
974 assert(ARM::GPRRegClass.contains(DestReg));
975 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
976 .addReg(SrcReg, getKillRegState(KillSrc))
977 .add(predOps(ARMCC::AL));
978 return;
979 }
980
981 assert(Opc && "Impossible reg-to-reg copy");
982
983 const TargetRegisterInfo *TRI = &getRegisterInfo();
984 MachineInstrBuilder Mov;
985
986 // Copy register tuples backward when the first Dest reg overlaps with SrcReg.
987 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
988 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
989 Spacing = -Spacing;
990 }
991 #ifndef NDEBUG
992 SmallSet<unsigned, 4> DstRegs;
993 #endif
994 for (unsigned i = 0; i != SubRegs; ++i) {
995 Register Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
996 Register Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
997 assert(Dst && Src && "Bad sub-register");
998 #ifndef NDEBUG
999 assert(!DstRegs.count(Src) && "destructive vector copy");
1000 DstRegs.insert(Dst);
1001 #endif
1002 Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src);
1003 // VORR (NEON or MVE) takes two source operands.
1004 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) {
1005 Mov.addReg(Src);
1006 }
1007 // MVE VORR takes predicate operands in place of an ordinary condition.
1008 if (Opc == ARM::MVE_VORR)
1009 addUnpredicatedMveVpredROp(Mov, Dst);
1010 else
1011 Mov = Mov.add(predOps(ARMCC::AL));
1012 // MOVr can set CC.
1013 if (Opc == ARM::MOVr)
1014 Mov = Mov.add(condCodeOp());
1015 }
1016 // Add implicit super-register defs and kills to the last instruction.
1017 Mov->addRegisterDefined(DestReg, TRI);
1018 if (KillSrc)
1019 Mov->addRegisterKilled(SrcReg, TRI);
1020 }
1021
1022 Optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr & MI) const1023 ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
1024 // VMOVRRD is also a copy instruction but it requires
1025 // special way of handling. It is more complex copy version
1026 // and since that we are not considering it. For recognition
1027 // of such instruction isExtractSubregLike MI interface fuction
1028 // could be used.
1029 // VORRq is considered as a move only if two inputs are
1030 // the same register.
1031 if (!MI.isMoveReg() ||
1032 (MI.getOpcode() == ARM::VORRq &&
1033 MI.getOperand(1).getReg() != MI.getOperand(2).getReg()))
1034 return None;
1035 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1036 }
1037
1038 Optional<ParamLoadedValue>
describeLoadedValue(const MachineInstr & MI,Register Reg) const1039 ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI,
1040 Register Reg) const {
1041 if (auto DstSrcPair = isCopyInstrImpl(MI)) {
1042 Register DstReg = DstSrcPair->Destination->getReg();
1043
1044 // TODO: We don't handle cases where the forwarding reg is narrower/wider
1045 // than the copy registers. Consider for example:
1046 //
1047 // s16 = VMOVS s0
1048 // s17 = VMOVS s1
1049 // call @callee(d0)
1050 //
1051 // We'd like to describe the call site value of d0 as d8, but this requires
1052 // gathering and merging the descriptions for the two VMOVS instructions.
1053 //
1054 // We also don't handle the reverse situation, where the forwarding reg is
1055 // narrower than the copy destination:
1056 //
1057 // d8 = VMOVD d0
1058 // call @callee(s1)
1059 //
1060 // We need to produce a fragment description (the call site value of s1 is
1061 // /not/ just d8).
1062 if (DstReg != Reg)
1063 return None;
1064 }
1065 return TargetInstrInfo::describeLoadedValue(MI, Reg);
1066 }
1067
1068 const MachineInstrBuilder &
AddDReg(MachineInstrBuilder & MIB,unsigned Reg,unsigned SubIdx,unsigned State,const TargetRegisterInfo * TRI) const1069 ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
1070 unsigned SubIdx, unsigned State,
1071 const TargetRegisterInfo *TRI) const {
1072 if (!SubIdx)
1073 return MIB.addReg(Reg, State);
1074
1075 if (Register::isPhysicalRegister(Reg))
1076 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1077 return MIB.addReg(Reg, State, SubIdx);
1078 }
1079
1080 void ARMBaseInstrInfo::
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register SrcReg,bool isKill,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const1081 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
1082 Register SrcReg, bool isKill, int FI,
1083 const TargetRegisterClass *RC,
1084 const TargetRegisterInfo *TRI) const {
1085 MachineFunction &MF = *MBB.getParent();
1086 MachineFrameInfo &MFI = MF.getFrameInfo();
1087 Align Alignment = MFI.getObjectAlign(FI);
1088
1089 MachineMemOperand *MMO = MF.getMachineMemOperand(
1090 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
1091 MFI.getObjectSize(FI), Alignment);
1092
1093 switch (TRI->getSpillSize(*RC)) {
1094 case 2:
1095 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1096 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH))
1097 .addReg(SrcReg, getKillRegState(isKill))
1098 .addFrameIndex(FI)
1099 .addImm(0)
1100 .addMemOperand(MMO)
1101 .add(predOps(ARMCC::AL));
1102 } else
1103 llvm_unreachable("Unknown reg class!");
1104 break;
1105 case 4:
1106 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1107 BuildMI(MBB, I, DebugLoc(), get(ARM::STRi12))
1108 .addReg(SrcReg, getKillRegState(isKill))
1109 .addFrameIndex(FI)
1110 .addImm(0)
1111 .addMemOperand(MMO)
1112 .add(predOps(ARMCC::AL));
1113 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1114 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRS))
1115 .addReg(SrcReg, getKillRegState(isKill))
1116 .addFrameIndex(FI)
1117 .addImm(0)
1118 .addMemOperand(MMO)
1119 .add(predOps(ARMCC::AL));
1120 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1121 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTR_P0_off))
1122 .addReg(SrcReg, getKillRegState(isKill))
1123 .addFrameIndex(FI)
1124 .addImm(0)
1125 .addMemOperand(MMO)
1126 .add(predOps(ARMCC::AL));
1127 } else
1128 llvm_unreachable("Unknown reg class!");
1129 break;
1130 case 8:
1131 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1132 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRD))
1133 .addReg(SrcReg, getKillRegState(isKill))
1134 .addFrameIndex(FI)
1135 .addImm(0)
1136 .addMemOperand(MMO)
1137 .add(predOps(ARMCC::AL));
1138 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1139 if (Subtarget.hasV5TEOps()) {
1140 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD));
1141 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
1142 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
1143 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
1144 .add(predOps(ARMCC::AL));
1145 } else {
1146 // Fallback to STM instruction, which has existed since the dawn of
1147 // time.
1148 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STMIA))
1149 .addFrameIndex(FI)
1150 .addMemOperand(MMO)
1151 .add(predOps(ARMCC::AL));
1152 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
1153 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
1154 }
1155 } else
1156 llvm_unreachable("Unknown reg class!");
1157 break;
1158 case 16:
1159 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1160 // Use aligned spills if the stack can be realigned.
1161 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) {
1162 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64))
1163 .addFrameIndex(FI)
1164 .addImm(16)
1165 .addReg(SrcReg, getKillRegState(isKill))
1166 .addMemOperand(MMO)
1167 .add(predOps(ARMCC::AL));
1168 } else {
1169 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMQIA))
1170 .addReg(SrcReg, getKillRegState(isKill))
1171 .addFrameIndex(FI)
1172 .addMemOperand(MMO)
1173 .add(predOps(ARMCC::AL));
1174 }
1175 } else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1176 Subtarget.hasMVEIntegerOps()) {
1177 auto MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::MVE_VSTRWU32));
1178 MIB.addReg(SrcReg, getKillRegState(isKill))
1179 .addFrameIndex(FI)
1180 .addImm(0)
1181 .addMemOperand(MMO);
1182 addUnpredicatedMveVpredNOp(MIB);
1183 } else
1184 llvm_unreachable("Unknown reg class!");
1185 break;
1186 case 24:
1187 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1188 // Use aligned spills if the stack can be realigned.
1189 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1190 Subtarget.hasNEON()) {
1191 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo))
1192 .addFrameIndex(FI)
1193 .addImm(16)
1194 .addReg(SrcReg, getKillRegState(isKill))
1195 .addMemOperand(MMO)
1196 .add(predOps(ARMCC::AL));
1197 } else {
1198 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(),
1199 get(ARM::VSTMDIA))
1200 .addFrameIndex(FI)
1201 .add(predOps(ARMCC::AL))
1202 .addMemOperand(MMO);
1203 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1204 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1205 AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1206 }
1207 } else
1208 llvm_unreachable("Unknown reg class!");
1209 break;
1210 case 32:
1211 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
1212 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1213 Subtarget.hasNEON()) {
1214 // FIXME: It's possible to only store part of the QQ register if the
1215 // spilled def has a sub-register index.
1216 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64QPseudo))
1217 .addFrameIndex(FI)
1218 .addImm(16)
1219 .addReg(SrcReg, getKillRegState(isKill))
1220 .addMemOperand(MMO)
1221 .add(predOps(ARMCC::AL));
1222 } else {
1223 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(),
1224 get(ARM::VSTMDIA))
1225 .addFrameIndex(FI)
1226 .add(predOps(ARMCC::AL))
1227 .addMemOperand(MMO);
1228 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1229 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1230 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1231 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
1232 }
1233 } else
1234 llvm_unreachable("Unknown reg class!");
1235 break;
1236 case 64:
1237 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1238 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMDIA))
1239 .addFrameIndex(FI)
1240 .add(predOps(ARMCC::AL))
1241 .addMemOperand(MMO);
1242 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
1243 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
1244 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
1245 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
1246 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
1247 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
1248 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
1249 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
1250 } else
1251 llvm_unreachable("Unknown reg class!");
1252 break;
1253 default:
1254 llvm_unreachable("Unknown reg class!");
1255 }
1256 }
1257
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex) const1258 unsigned ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
1259 int &FrameIndex) const {
1260 switch (MI.getOpcode()) {
1261 default: break;
1262 case ARM::STRrs:
1263 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
1264 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
1265 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
1266 MI.getOperand(3).getImm() == 0) {
1267 FrameIndex = MI.getOperand(1).getIndex();
1268 return MI.getOperand(0).getReg();
1269 }
1270 break;
1271 case ARM::STRi12:
1272 case ARM::t2STRi12:
1273 case ARM::tSTRspi:
1274 case ARM::VSTRD:
1275 case ARM::VSTRS:
1276 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
1277 MI.getOperand(2).getImm() == 0) {
1278 FrameIndex = MI.getOperand(1).getIndex();
1279 return MI.getOperand(0).getReg();
1280 }
1281 break;
1282 case ARM::VSTR_P0_off:
1283 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
1284 MI.getOperand(1).getImm() == 0) {
1285 FrameIndex = MI.getOperand(0).getIndex();
1286 return ARM::P0;
1287 }
1288 break;
1289 case ARM::VST1q64:
1290 case ARM::VST1d64TPseudo:
1291 case ARM::VST1d64QPseudo:
1292 if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) {
1293 FrameIndex = MI.getOperand(0).getIndex();
1294 return MI.getOperand(2).getReg();
1295 }
1296 break;
1297 case ARM::VSTMQIA:
1298 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1299 FrameIndex = MI.getOperand(1).getIndex();
1300 return MI.getOperand(0).getReg();
1301 }
1302 break;
1303 }
1304
1305 return 0;
1306 }
1307
isStoreToStackSlotPostFE(const MachineInstr & MI,int & FrameIndex) const1308 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
1309 int &FrameIndex) const {
1310 SmallVector<const MachineMemOperand *, 1> Accesses;
1311 if (MI.mayStore() && hasStoreToStackSlot(MI, Accesses) &&
1312 Accesses.size() == 1) {
1313 FrameIndex =
1314 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
1315 ->getFrameIndex();
1316 return true;
1317 }
1318 return false;
1319 }
1320
1321 void ARMBaseInstrInfo::
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const1322 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
1323 Register DestReg, int FI,
1324 const TargetRegisterClass *RC,
1325 const TargetRegisterInfo *TRI) const {
1326 DebugLoc DL;
1327 if (I != MBB.end()) DL = I->getDebugLoc();
1328 MachineFunction &MF = *MBB.getParent();
1329 MachineFrameInfo &MFI = MF.getFrameInfo();
1330 const Align Alignment = MFI.getObjectAlign(FI);
1331 MachineMemOperand *MMO = MF.getMachineMemOperand(
1332 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
1333 MFI.getObjectSize(FI), Alignment);
1334
1335 switch (TRI->getSpillSize(*RC)) {
1336 case 2:
1337 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1338 BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg)
1339 .addFrameIndex(FI)
1340 .addImm(0)
1341 .addMemOperand(MMO)
1342 .add(predOps(ARMCC::AL));
1343 } else
1344 llvm_unreachable("Unknown reg class!");
1345 break;
1346 case 4:
1347 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1348 BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
1349 .addFrameIndex(FI)
1350 .addImm(0)
1351 .addMemOperand(MMO)
1352 .add(predOps(ARMCC::AL));
1353 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1354 BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
1355 .addFrameIndex(FI)
1356 .addImm(0)
1357 .addMemOperand(MMO)
1358 .add(predOps(ARMCC::AL));
1359 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1360 BuildMI(MBB, I, DL, get(ARM::VLDR_P0_off), DestReg)
1361 .addFrameIndex(FI)
1362 .addImm(0)
1363 .addMemOperand(MMO)
1364 .add(predOps(ARMCC::AL));
1365 } else
1366 llvm_unreachable("Unknown reg class!");
1367 break;
1368 case 8:
1369 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1370 BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
1371 .addFrameIndex(FI)
1372 .addImm(0)
1373 .addMemOperand(MMO)
1374 .add(predOps(ARMCC::AL));
1375 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1376 MachineInstrBuilder MIB;
1377
1378 if (Subtarget.hasV5TEOps()) {
1379 MIB = BuildMI(MBB, I, DL, get(ARM::LDRD));
1380 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
1381 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
1382 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
1383 .add(predOps(ARMCC::AL));
1384 } else {
1385 // Fallback to LDM instruction, which has existed since the dawn of
1386 // time.
1387 MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA))
1388 .addFrameIndex(FI)
1389 .addMemOperand(MMO)
1390 .add(predOps(ARMCC::AL));
1391 MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
1392 MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
1393 }
1394
1395 if (Register::isPhysicalRegister(DestReg))
1396 MIB.addReg(DestReg, RegState::ImplicitDefine);
1397 } else
1398 llvm_unreachable("Unknown reg class!");
1399 break;
1400 case 16:
1401 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1402 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) {
1403 BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
1404 .addFrameIndex(FI)
1405 .addImm(16)
1406 .addMemOperand(MMO)
1407 .add(predOps(ARMCC::AL));
1408 } else {
1409 BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
1410 .addFrameIndex(FI)
1411 .addMemOperand(MMO)
1412 .add(predOps(ARMCC::AL));
1413 }
1414 } else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1415 Subtarget.hasMVEIntegerOps()) {
1416 auto MIB = BuildMI(MBB, I, DL, get(ARM::MVE_VLDRWU32), DestReg);
1417 MIB.addFrameIndex(FI)
1418 .addImm(0)
1419 .addMemOperand(MMO);
1420 addUnpredicatedMveVpredNOp(MIB);
1421 } else
1422 llvm_unreachable("Unknown reg class!");
1423 break;
1424 case 24:
1425 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1426 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1427 Subtarget.hasNEON()) {
1428 BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg)
1429 .addFrameIndex(FI)
1430 .addImm(16)
1431 .addMemOperand(MMO)
1432 .add(predOps(ARMCC::AL));
1433 } else {
1434 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1435 .addFrameIndex(FI)
1436 .addMemOperand(MMO)
1437 .add(predOps(ARMCC::AL));
1438 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1439 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1440 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1441 if (Register::isPhysicalRegister(DestReg))
1442 MIB.addReg(DestReg, RegState::ImplicitDefine);
1443 }
1444 } else
1445 llvm_unreachable("Unknown reg class!");
1446 break;
1447 case 32:
1448 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
1449 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) &&
1450 Subtarget.hasNEON()) {
1451 BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
1452 .addFrameIndex(FI)
1453 .addImm(16)
1454 .addMemOperand(MMO)
1455 .add(predOps(ARMCC::AL));
1456 } else {
1457 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1458 .addFrameIndex(FI)
1459 .add(predOps(ARMCC::AL))
1460 .addMemOperand(MMO);
1461 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1462 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1463 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1464 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1465 if (Register::isPhysicalRegister(DestReg))
1466 MIB.addReg(DestReg, RegState::ImplicitDefine);
1467 }
1468 } else
1469 llvm_unreachable("Unknown reg class!");
1470 break;
1471 case 64:
1472 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1473 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1474 .addFrameIndex(FI)
1475 .add(predOps(ARMCC::AL))
1476 .addMemOperand(MMO);
1477 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1478 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1479 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1480 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1481 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
1482 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
1483 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
1484 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
1485 if (Register::isPhysicalRegister(DestReg))
1486 MIB.addReg(DestReg, RegState::ImplicitDefine);
1487 } else
1488 llvm_unreachable("Unknown reg class!");
1489 break;
1490 default:
1491 llvm_unreachable("Unknown regclass!");
1492 }
1493 }
1494
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex) const1495 unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
1496 int &FrameIndex) const {
1497 switch (MI.getOpcode()) {
1498 default: break;
1499 case ARM::LDRrs:
1500 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
1501 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
1502 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
1503 MI.getOperand(3).getImm() == 0) {
1504 FrameIndex = MI.getOperand(1).getIndex();
1505 return MI.getOperand(0).getReg();
1506 }
1507 break;
1508 case ARM::LDRi12:
1509 case ARM::t2LDRi12:
1510 case ARM::tLDRspi:
1511 case ARM::VLDRD:
1512 case ARM::VLDRS:
1513 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
1514 MI.getOperand(2).getImm() == 0) {
1515 FrameIndex = MI.getOperand(1).getIndex();
1516 return MI.getOperand(0).getReg();
1517 }
1518 break;
1519 case ARM::VLDR_P0_off:
1520 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
1521 MI.getOperand(1).getImm() == 0) {
1522 FrameIndex = MI.getOperand(0).getIndex();
1523 return ARM::P0;
1524 }
1525 break;
1526 case ARM::VLD1q64:
1527 case ARM::VLD1d8TPseudo:
1528 case ARM::VLD1d16TPseudo:
1529 case ARM::VLD1d32TPseudo:
1530 case ARM::VLD1d64TPseudo:
1531 case ARM::VLD1d8QPseudo:
1532 case ARM::VLD1d16QPseudo:
1533 case ARM::VLD1d32QPseudo:
1534 case ARM::VLD1d64QPseudo:
1535 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1536 FrameIndex = MI.getOperand(1).getIndex();
1537 return MI.getOperand(0).getReg();
1538 }
1539 break;
1540 case ARM::VLDMQIA:
1541 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
1542 FrameIndex = MI.getOperand(1).getIndex();
1543 return MI.getOperand(0).getReg();
1544 }
1545 break;
1546 }
1547
1548 return 0;
1549 }
1550
isLoadFromStackSlotPostFE(const MachineInstr & MI,int & FrameIndex) const1551 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
1552 int &FrameIndex) const {
1553 SmallVector<const MachineMemOperand *, 1> Accesses;
1554 if (MI.mayLoad() && hasLoadFromStackSlot(MI, Accesses) &&
1555 Accesses.size() == 1) {
1556 FrameIndex =
1557 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
1558 ->getFrameIndex();
1559 return true;
1560 }
1561 return false;
1562 }
1563
1564 /// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
1565 /// depending on whether the result is used.
expandMEMCPY(MachineBasicBlock::iterator MI) const1566 void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
1567 bool isThumb1 = Subtarget.isThumb1Only();
1568 bool isThumb2 = Subtarget.isThumb2();
1569 const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo();
1570
1571 DebugLoc dl = MI->getDebugLoc();
1572 MachineBasicBlock *BB = MI->getParent();
1573
1574 MachineInstrBuilder LDM, STM;
1575 if (isThumb1 || !MI->getOperand(1).isDead()) {
1576 MachineOperand LDWb(MI->getOperand(1));
1577 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1578 : isThumb1 ? ARM::tLDMIA_UPD
1579 : ARM::LDMIA_UPD))
1580 .add(LDWb);
1581 } else {
1582 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1583 }
1584
1585 if (isThumb1 || !MI->getOperand(0).isDead()) {
1586 MachineOperand STWb(MI->getOperand(0));
1587 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD
1588 : isThumb1 ? ARM::tSTMIA_UPD
1589 : ARM::STMIA_UPD))
1590 .add(STWb);
1591 } else {
1592 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1593 }
1594
1595 MachineOperand LDBase(MI->getOperand(3));
1596 LDM.add(LDBase).add(predOps(ARMCC::AL));
1597
1598 MachineOperand STBase(MI->getOperand(2));
1599 STM.add(STBase).add(predOps(ARMCC::AL));
1600
1601 // Sort the scratch registers into ascending order.
1602 const TargetRegisterInfo &TRI = getRegisterInfo();
1603 SmallVector<unsigned, 6> ScratchRegs;
1604 for(unsigned I = 5; I < MI->getNumOperands(); ++I)
1605 ScratchRegs.push_back(MI->getOperand(I).getReg());
1606 llvm::sort(ScratchRegs,
1607 [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool {
1608 return TRI.getEncodingValue(Reg1) <
1609 TRI.getEncodingValue(Reg2);
1610 });
1611
1612 for (const auto &Reg : ScratchRegs) {
1613 LDM.addReg(Reg, RegState::Define);
1614 STM.addReg(Reg, RegState::Kill);
1615 }
1616
1617 BB->erase(MI);
1618 }
1619
expandPostRAPseudo(MachineInstr & MI) const1620 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1621 if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1622 assert(getSubtarget().getTargetTriple().isOSBinFormatMachO() &&
1623 "LOAD_STACK_GUARD currently supported only for MachO.");
1624 expandLoadStackGuard(MI);
1625 MI.getParent()->erase(MI);
1626 return true;
1627 }
1628
1629 if (MI.getOpcode() == ARM::MEMCPY) {
1630 expandMEMCPY(MI);
1631 return true;
1632 }
1633
1634 // This hook gets to expand COPY instructions before they become
1635 // copyPhysReg() calls. Look for VMOVS instructions that can legally be
1636 // widened to VMOVD. We prefer the VMOVD when possible because it may be
1637 // changed into a VORR that can go down the NEON pipeline.
1638 if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1639 return false;
1640
1641 // Look for a copy between even S-registers. That is where we keep floats
1642 // when using NEON v2f32 instructions for f32 arithmetic.
1643 Register DstRegS = MI.getOperand(0).getReg();
1644 Register SrcRegS = MI.getOperand(1).getReg();
1645 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
1646 return false;
1647
1648 const TargetRegisterInfo *TRI = &getRegisterInfo();
1649 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1650 &ARM::DPRRegClass);
1651 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1652 &ARM::DPRRegClass);
1653 if (!DstRegD || !SrcRegD)
1654 return false;
1655
1656 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only
1657 // legal if the COPY already defines the full DstRegD, and it isn't a
1658 // sub-register insertion.
1659 if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI))
1660 return false;
1661
1662 // A dead copy shouldn't show up here, but reject it just in case.
1663 if (MI.getOperand(0).isDead())
1664 return false;
1665
1666 // All clear, widen the COPY.
1667 LLVM_DEBUG(dbgs() << "widening: " << MI);
1668 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
1669
1670 // Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg
1671 // or some other super-register.
1672 int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD);
1673 if (ImpDefIdx != -1)
1674 MI.RemoveOperand(ImpDefIdx);
1675
1676 // Change the opcode and operands.
1677 MI.setDesc(get(ARM::VMOVD));
1678 MI.getOperand(0).setReg(DstRegD);
1679 MI.getOperand(1).setReg(SrcRegD);
1680 MIB.add(predOps(ARMCC::AL));
1681
1682 // We are now reading SrcRegD instead of SrcRegS. This may upset the
1683 // register scavenger and machine verifier, so we need to indicate that we
1684 // are reading an undefined value from SrcRegD, but a proper value from
1685 // SrcRegS.
1686 MI.getOperand(1).setIsUndef();
1687 MIB.addReg(SrcRegS, RegState::Implicit);
1688
1689 // SrcRegD may actually contain an unrelated value in the ssub_1
1690 // sub-register. Don't kill it. Only kill the ssub_0 sub-register.
1691 if (MI.getOperand(1).isKill()) {
1692 MI.getOperand(1).setIsKill(false);
1693 MI.addRegisterKilled(SrcRegS, TRI, true);
1694 }
1695
1696 LLVM_DEBUG(dbgs() << "replaced by: " << MI);
1697 return true;
1698 }
1699
1700 /// Create a copy of a const pool value. Update CPI to the new index and return
1701 /// the label UID.
duplicateCPV(MachineFunction & MF,unsigned & CPI)1702 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
1703 MachineConstantPool *MCP = MF.getConstantPool();
1704 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1705
1706 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
1707 assert(MCPE.isMachineConstantPoolEntry() &&
1708 "Expecting a machine constantpool entry!");
1709 ARMConstantPoolValue *ACPV =
1710 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
1711
1712 unsigned PCLabelId = AFI->createPICLabelUId();
1713 ARMConstantPoolValue *NewCPV = nullptr;
1714
1715 // FIXME: The below assumes PIC relocation model and that the function
1716 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
1717 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
1718 // instructions, so that's probably OK, but is PIC always correct when
1719 // we get here?
1720 if (ACPV->isGlobalValue())
1721 NewCPV = ARMConstantPoolConstant::Create(
1722 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, ARMCP::CPValue,
1723 4, ACPV->getModifier(), ACPV->mustAddCurrentAddress());
1724 else if (ACPV->isExtSymbol())
1725 NewCPV = ARMConstantPoolSymbol::
1726 Create(MF.getFunction().getContext(),
1727 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1728 else if (ACPV->isBlockAddress())
1729 NewCPV = ARMConstantPoolConstant::
1730 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1731 ARMCP::CPBlockAddress, 4);
1732 else if (ACPV->isLSDA())
1733 NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId,
1734 ARMCP::CPLSDA, 4);
1735 else if (ACPV->isMachineBasicBlock())
1736 NewCPV = ARMConstantPoolMBB::
1737 Create(MF.getFunction().getContext(),
1738 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1739 else
1740 llvm_unreachable("Unexpected ARM constantpool value type!!");
1741 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlign());
1742 return PCLabelId;
1743 }
1744
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,unsigned SubIdx,const MachineInstr & Orig,const TargetRegisterInfo & TRI) const1745 void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
1746 MachineBasicBlock::iterator I,
1747 Register DestReg, unsigned SubIdx,
1748 const MachineInstr &Orig,
1749 const TargetRegisterInfo &TRI) const {
1750 unsigned Opcode = Orig.getOpcode();
1751 switch (Opcode) {
1752 default: {
1753 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
1754 MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
1755 MBB.insert(I, MI);
1756 break;
1757 }
1758 case ARM::tLDRpci_pic:
1759 case ARM::t2LDRpci_pic: {
1760 MachineFunction &MF = *MBB.getParent();
1761 unsigned CPI = Orig.getOperand(1).getIndex();
1762 unsigned PCLabelId = duplicateCPV(MF, CPI);
1763 BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
1764 .addConstantPoolIndex(CPI)
1765 .addImm(PCLabelId)
1766 .cloneMemRefs(Orig);
1767 break;
1768 }
1769 }
1770 }
1771
1772 MachineInstr &
duplicate(MachineBasicBlock & MBB,MachineBasicBlock::iterator InsertBefore,const MachineInstr & Orig) const1773 ARMBaseInstrInfo::duplicate(MachineBasicBlock &MBB,
1774 MachineBasicBlock::iterator InsertBefore,
1775 const MachineInstr &Orig) const {
1776 MachineInstr &Cloned = TargetInstrInfo::duplicate(MBB, InsertBefore, Orig);
1777 MachineBasicBlock::instr_iterator I = Cloned.getIterator();
1778 for (;;) {
1779 switch (I->getOpcode()) {
1780 case ARM::tLDRpci_pic:
1781 case ARM::t2LDRpci_pic: {
1782 MachineFunction &MF = *MBB.getParent();
1783 unsigned CPI = I->getOperand(1).getIndex();
1784 unsigned PCLabelId = duplicateCPV(MF, CPI);
1785 I->getOperand(1).setIndex(CPI);
1786 I->getOperand(2).setImm(PCLabelId);
1787 break;
1788 }
1789 }
1790 if (!I->isBundledWithSucc())
1791 break;
1792 ++I;
1793 }
1794 return Cloned;
1795 }
1796
produceSameValue(const MachineInstr & MI0,const MachineInstr & MI1,const MachineRegisterInfo * MRI) const1797 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
1798 const MachineInstr &MI1,
1799 const MachineRegisterInfo *MRI) const {
1800 unsigned Opcode = MI0.getOpcode();
1801 if (Opcode == ARM::t2LDRpci ||
1802 Opcode == ARM::t2LDRpci_pic ||
1803 Opcode == ARM::tLDRpci ||
1804 Opcode == ARM::tLDRpci_pic ||
1805 Opcode == ARM::LDRLIT_ga_pcrel ||
1806 Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1807 Opcode == ARM::tLDRLIT_ga_pcrel ||
1808 Opcode == ARM::MOV_ga_pcrel ||
1809 Opcode == ARM::MOV_ga_pcrel_ldr ||
1810 Opcode == ARM::t2MOV_ga_pcrel) {
1811 if (MI1.getOpcode() != Opcode)
1812 return false;
1813 if (MI0.getNumOperands() != MI1.getNumOperands())
1814 return false;
1815
1816 const MachineOperand &MO0 = MI0.getOperand(1);
1817 const MachineOperand &MO1 = MI1.getOperand(1);
1818 if (MO0.getOffset() != MO1.getOffset())
1819 return false;
1820
1821 if (Opcode == ARM::LDRLIT_ga_pcrel ||
1822 Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1823 Opcode == ARM::tLDRLIT_ga_pcrel ||
1824 Opcode == ARM::MOV_ga_pcrel ||
1825 Opcode == ARM::MOV_ga_pcrel_ldr ||
1826 Opcode == ARM::t2MOV_ga_pcrel)
1827 // Ignore the PC labels.
1828 return MO0.getGlobal() == MO1.getGlobal();
1829
1830 const MachineFunction *MF = MI0.getParent()->getParent();
1831 const MachineConstantPool *MCP = MF->getConstantPool();
1832 int CPI0 = MO0.getIndex();
1833 int CPI1 = MO1.getIndex();
1834 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1835 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1836 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
1837 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
1838 if (isARMCP0 && isARMCP1) {
1839 ARMConstantPoolValue *ACPV0 =
1840 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1841 ARMConstantPoolValue *ACPV1 =
1842 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1843 return ACPV0->hasSameValue(ACPV1);
1844 } else if (!isARMCP0 && !isARMCP1) {
1845 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
1846 }
1847 return false;
1848 } else if (Opcode == ARM::PICLDR) {
1849 if (MI1.getOpcode() != Opcode)
1850 return false;
1851 if (MI0.getNumOperands() != MI1.getNumOperands())
1852 return false;
1853
1854 Register Addr0 = MI0.getOperand(1).getReg();
1855 Register Addr1 = MI1.getOperand(1).getReg();
1856 if (Addr0 != Addr1) {
1857 if (!MRI || !Register::isVirtualRegister(Addr0) ||
1858 !Register::isVirtualRegister(Addr1))
1859 return false;
1860
1861 // This assumes SSA form.
1862 MachineInstr *Def0 = MRI->getVRegDef(Addr0);
1863 MachineInstr *Def1 = MRI->getVRegDef(Addr1);
1864 // Check if the loaded value, e.g. a constantpool of a global address, are
1865 // the same.
1866 if (!produceSameValue(*Def0, *Def1, MRI))
1867 return false;
1868 }
1869
1870 for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
1871 // %12 = PICLDR %11, 0, 14, %noreg
1872 const MachineOperand &MO0 = MI0.getOperand(i);
1873 const MachineOperand &MO1 = MI1.getOperand(i);
1874 if (!MO0.isIdenticalTo(MO1))
1875 return false;
1876 }
1877 return true;
1878 }
1879
1880 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
1881 }
1882
1883 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
1884 /// determine if two loads are loading from the same base address. It should
1885 /// only return true if the base pointers are the same and the only differences
1886 /// between the two addresses is the offset. It also returns the offsets by
1887 /// reference.
1888 ///
1889 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
1890 /// is permanently disabled.
areLoadsFromSameBasePtr(SDNode * Load1,SDNode * Load2,int64_t & Offset1,int64_t & Offset2) const1891 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1892 int64_t &Offset1,
1893 int64_t &Offset2) const {
1894 // Don't worry about Thumb: just ARM and Thumb2.
1895 if (Subtarget.isThumb1Only()) return false;
1896
1897 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
1898 return false;
1899
1900 switch (Load1->getMachineOpcode()) {
1901 default:
1902 return false;
1903 case ARM::LDRi12:
1904 case ARM::LDRBi12:
1905 case ARM::LDRD:
1906 case ARM::LDRH:
1907 case ARM::LDRSB:
1908 case ARM::LDRSH:
1909 case ARM::VLDRD:
1910 case ARM::VLDRS:
1911 case ARM::t2LDRi8:
1912 case ARM::t2LDRBi8:
1913 case ARM::t2LDRDi8:
1914 case ARM::t2LDRSHi8:
1915 case ARM::t2LDRi12:
1916 case ARM::t2LDRBi12:
1917 case ARM::t2LDRSHi12:
1918 break;
1919 }
1920
1921 switch (Load2->getMachineOpcode()) {
1922 default:
1923 return false;
1924 case ARM::LDRi12:
1925 case ARM::LDRBi12:
1926 case ARM::LDRD:
1927 case ARM::LDRH:
1928 case ARM::LDRSB:
1929 case ARM::LDRSH:
1930 case ARM::VLDRD:
1931 case ARM::VLDRS:
1932 case ARM::t2LDRi8:
1933 case ARM::t2LDRBi8:
1934 case ARM::t2LDRSHi8:
1935 case ARM::t2LDRi12:
1936 case ARM::t2LDRBi12:
1937 case ARM::t2LDRSHi12:
1938 break;
1939 }
1940
1941 // Check if base addresses and chain operands match.
1942 if (Load1->getOperand(0) != Load2->getOperand(0) ||
1943 Load1->getOperand(4) != Load2->getOperand(4))
1944 return false;
1945
1946 // Index should be Reg0.
1947 if (Load1->getOperand(3) != Load2->getOperand(3))
1948 return false;
1949
1950 // Determine the offsets.
1951 if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
1952 isa<ConstantSDNode>(Load2->getOperand(1))) {
1953 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
1954 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
1955 return true;
1956 }
1957
1958 return false;
1959 }
1960
1961 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
1962 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
1963 /// be scheduled togther. On some targets if two loads are loading from
1964 /// addresses in the same cache line, it's better if they are scheduled
1965 /// together. This function takes two integers that represent the load offsets
1966 /// from the common base address. It returns true if it decides it's desirable
1967 /// to schedule the two loads together. "NumLoads" is the number of loads that
1968 /// have already been scheduled after Load1.
1969 ///
1970 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
1971 /// is permanently disabled.
shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads) const1972 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1973 int64_t Offset1, int64_t Offset2,
1974 unsigned NumLoads) const {
1975 // Don't worry about Thumb: just ARM and Thumb2.
1976 if (Subtarget.isThumb1Only()) return false;
1977
1978 assert(Offset2 > Offset1);
1979
1980 if ((Offset2 - Offset1) / 8 > 64)
1981 return false;
1982
1983 // Check if the machine opcodes are different. If they are different
1984 // then we consider them to not be of the same base address,
1985 // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12.
1986 // In this case, they are considered to be the same because they are different
1987 // encoding forms of the same basic instruction.
1988 if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) &&
1989 !((Load1->getMachineOpcode() == ARM::t2LDRBi8 &&
1990 Load2->getMachineOpcode() == ARM::t2LDRBi12) ||
1991 (Load1->getMachineOpcode() == ARM::t2LDRBi12 &&
1992 Load2->getMachineOpcode() == ARM::t2LDRBi8)))
1993 return false; // FIXME: overly conservative?
1994
1995 // Four loads in a row should be sufficient.
1996 if (NumLoads >= 3)
1997 return false;
1998
1999 return true;
2000 }
2001
isSchedulingBoundary(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const2002 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
2003 const MachineBasicBlock *MBB,
2004 const MachineFunction &MF) const {
2005 // Debug info is never a scheduling boundary. It's necessary to be explicit
2006 // due to the special treatment of IT instructions below, otherwise a
2007 // dbg_value followed by an IT will result in the IT instruction being
2008 // considered a scheduling hazard, which is wrong. It should be the actual
2009 // instruction preceding the dbg_value instruction(s), just like it is
2010 // when debug info is not present.
2011 if (MI.isDebugInstr())
2012 return false;
2013
2014 // Terminators and labels can't be scheduled around.
2015 if (MI.isTerminator() || MI.isPosition())
2016 return true;
2017
2018 // INLINEASM_BR can jump to another block
2019 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
2020 return true;
2021
2022 // Treat the start of the IT block as a scheduling boundary, but schedule
2023 // t2IT along with all instructions following it.
2024 // FIXME: This is a big hammer. But the alternative is to add all potential
2025 // true and anti dependencies to IT block instructions as implicit operands
2026 // to the t2IT instruction. The added compile time and complexity does not
2027 // seem worth it.
2028 MachineBasicBlock::const_iterator I = MI;
2029 // Make sure to skip any debug instructions
2030 while (++I != MBB->end() && I->isDebugInstr())
2031 ;
2032 if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
2033 return true;
2034
2035 // Don't attempt to schedule around any instruction that defines
2036 // a stack-oriented pointer, as it's unlikely to be profitable. This
2037 // saves compile time, because it doesn't require every single
2038 // stack slot reference to depend on the instruction that does the
2039 // modification.
2040 // Calls don't actually change the stack pointer, even if they have imp-defs.
2041 // No ARM calling conventions change the stack pointer. (X86 calling
2042 // conventions sometimes do).
2043 if (!MI.isCall() && MI.definesRegister(ARM::SP))
2044 return true;
2045
2046 return false;
2047 }
2048
2049 bool ARMBaseInstrInfo::
isProfitableToIfCvt(MachineBasicBlock & MBB,unsigned NumCycles,unsigned ExtraPredCycles,BranchProbability Probability) const2050 isProfitableToIfCvt(MachineBasicBlock &MBB,
2051 unsigned NumCycles, unsigned ExtraPredCycles,
2052 BranchProbability Probability) const {
2053 if (!NumCycles)
2054 return false;
2055
2056 // If we are optimizing for size, see if the branch in the predecessor can be
2057 // lowered to cbn?z by the constant island lowering pass, and return false if
2058 // so. This results in a shorter instruction sequence.
2059 if (MBB.getParent()->getFunction().hasOptSize()) {
2060 MachineBasicBlock *Pred = *MBB.pred_begin();
2061 if (!Pred->empty()) {
2062 MachineInstr *LastMI = &*Pred->rbegin();
2063 if (LastMI->getOpcode() == ARM::t2Bcc) {
2064 const TargetRegisterInfo *TRI = &getRegisterInfo();
2065 MachineInstr *CmpMI = findCMPToFoldIntoCBZ(LastMI, TRI);
2066 if (CmpMI)
2067 return false;
2068 }
2069 }
2070 }
2071 return isProfitableToIfCvt(MBB, NumCycles, ExtraPredCycles,
2072 MBB, 0, 0, Probability);
2073 }
2074
2075 bool ARMBaseInstrInfo::
isProfitableToIfCvt(MachineBasicBlock & TBB,unsigned TCycles,unsigned TExtra,MachineBasicBlock & FBB,unsigned FCycles,unsigned FExtra,BranchProbability Probability) const2076 isProfitableToIfCvt(MachineBasicBlock &TBB,
2077 unsigned TCycles, unsigned TExtra,
2078 MachineBasicBlock &FBB,
2079 unsigned FCycles, unsigned FExtra,
2080 BranchProbability Probability) const {
2081 if (!TCycles)
2082 return false;
2083
2084 // In thumb code we often end up trading one branch for a IT block, and
2085 // if we are cloning the instruction can increase code size. Prevent
2086 // blocks with multiple predecesors from being ifcvted to prevent this
2087 // cloning.
2088 if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) {
2089 if (TBB.pred_size() != 1 || FBB.pred_size() != 1)
2090 return false;
2091 }
2092
2093 // Attempt to estimate the relative costs of predication versus branching.
2094 // Here we scale up each component of UnpredCost to avoid precision issue when
2095 // scaling TCycles/FCycles by Probability.
2096 const unsigned ScalingUpFactor = 1024;
2097
2098 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
2099 unsigned UnpredCost;
2100 if (!Subtarget.hasBranchPredictor()) {
2101 // When we don't have a branch predictor it's always cheaper to not take a
2102 // branch than take it, so we have to take that into account.
2103 unsigned NotTakenBranchCost = 1;
2104 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
2105 unsigned TUnpredCycles, FUnpredCycles;
2106 if (!FCycles) {
2107 // Triangle: TBB is the fallthrough
2108 TUnpredCycles = TCycles + NotTakenBranchCost;
2109 FUnpredCycles = TakenBranchCost;
2110 } else {
2111 // Diamond: TBB is the block that is branched to, FBB is the fallthrough
2112 TUnpredCycles = TCycles + TakenBranchCost;
2113 FUnpredCycles = FCycles + NotTakenBranchCost;
2114 // The branch at the end of FBB will disappear when it's predicated, so
2115 // discount it from PredCost.
2116 PredCost -= 1 * ScalingUpFactor;
2117 }
2118 // The total cost is the cost of each path scaled by their probabilites
2119 unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor);
2120 unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor);
2121 UnpredCost = TUnpredCost + FUnpredCost;
2122 // When predicating assume that the first IT can be folded away but later
2123 // ones cost one cycle each
2124 if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
2125 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2126 }
2127 } else {
2128 unsigned TUnpredCost = Probability.scale(TCycles * ScalingUpFactor);
2129 unsigned FUnpredCost =
2130 Probability.getCompl().scale(FCycles * ScalingUpFactor);
2131 UnpredCost = TUnpredCost + FUnpredCost;
2132 UnpredCost += 1 * ScalingUpFactor; // The branch itself
2133 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
2134 }
2135
2136 return PredCost <= UnpredCost;
2137 }
2138
2139 unsigned
extraSizeToPredicateInstructions(const MachineFunction & MF,unsigned NumInsts) const2140 ARMBaseInstrInfo::extraSizeToPredicateInstructions(const MachineFunction &MF,
2141 unsigned NumInsts) const {
2142 // Thumb2 needs a 2-byte IT instruction to predicate up to 4 instructions.
2143 // ARM has a condition code field in every predicable instruction, using it
2144 // doesn't change code size.
2145 return Subtarget.isThumb2() ? divideCeil(NumInsts, 4) * 2 : 0;
2146 }
2147
2148 unsigned
predictBranchSizeForIfCvt(MachineInstr & MI) const2149 ARMBaseInstrInfo::predictBranchSizeForIfCvt(MachineInstr &MI) const {
2150 // If this branch is likely to be folded into the comparison to form a
2151 // CB(N)Z, then removing it won't reduce code size at all, because that will
2152 // just replace the CB(N)Z with a CMP.
2153 if (MI.getOpcode() == ARM::t2Bcc &&
2154 findCMPToFoldIntoCBZ(&MI, &getRegisterInfo()))
2155 return 0;
2156
2157 unsigned Size = getInstSizeInBytes(MI);
2158
2159 // For Thumb2, all branches are 32-bit instructions during the if conversion
2160 // pass, but may be replaced with 16-bit instructions during size reduction.
2161 // Since the branches considered by if conversion tend to be forward branches
2162 // over small basic blocks, they are very likely to be in range for the
2163 // narrow instructions, so we assume the final code size will be half what it
2164 // currently is.
2165 if (Subtarget.isThumb2())
2166 Size /= 2;
2167
2168 return Size;
2169 }
2170
2171 bool
isProfitableToUnpredicate(MachineBasicBlock & TMBB,MachineBasicBlock & FMBB) const2172 ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
2173 MachineBasicBlock &FMBB) const {
2174 // Reduce false anti-dependencies to let the target's out-of-order execution
2175 // engine do its thing.
2176 return Subtarget.isProfitableToUnpredicate();
2177 }
2178
2179 /// getInstrPredicate - If instruction is predicated, returns its predicate
2180 /// condition, otherwise returns AL. It also returns the condition code
2181 /// register by reference.
getInstrPredicate(const MachineInstr & MI,Register & PredReg)2182 ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI,
2183 Register &PredReg) {
2184 int PIdx = MI.findFirstPredOperandIdx();
2185 if (PIdx == -1) {
2186 PredReg = 0;
2187 return ARMCC::AL;
2188 }
2189
2190 PredReg = MI.getOperand(PIdx+1).getReg();
2191 return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
2192 }
2193
getMatchingCondBranchOpcode(unsigned Opc)2194 unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) {
2195 if (Opc == ARM::B)
2196 return ARM::Bcc;
2197 if (Opc == ARM::tB)
2198 return ARM::tBcc;
2199 if (Opc == ARM::t2B)
2200 return ARM::t2Bcc;
2201
2202 llvm_unreachable("Unknown unconditional branch opcode!");
2203 }
2204
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const2205 MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI,
2206 bool NewMI,
2207 unsigned OpIdx1,
2208 unsigned OpIdx2) const {
2209 switch (MI.getOpcode()) {
2210 case ARM::MOVCCr:
2211 case ARM::t2MOVCCr: {
2212 // MOVCC can be commuted by inverting the condition.
2213 Register PredReg;
2214 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
2215 // MOVCC AL can't be inverted. Shouldn't happen.
2216 if (CC == ARMCC::AL || PredReg != ARM::CPSR)
2217 return nullptr;
2218 MachineInstr *CommutedMI =
2219 TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2220 if (!CommutedMI)
2221 return nullptr;
2222 // After swapping the MOVCC operands, also invert the condition.
2223 CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx())
2224 .setImm(ARMCC::getOppositeCondition(CC));
2225 return CommutedMI;
2226 }
2227 }
2228 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2229 }
2230
2231 /// Identify instructions that can be folded into a MOVCC instruction, and
2232 /// return the defining instruction.
2233 MachineInstr *
canFoldIntoMOVCC(Register Reg,const MachineRegisterInfo & MRI,const TargetInstrInfo * TII) const2234 ARMBaseInstrInfo::canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI,
2235 const TargetInstrInfo *TII) const {
2236 if (!Reg.isVirtual())
2237 return nullptr;
2238 if (!MRI.hasOneNonDBGUse(Reg))
2239 return nullptr;
2240 MachineInstr *MI = MRI.getVRegDef(Reg);
2241 if (!MI)
2242 return nullptr;
2243 // Check if MI can be predicated and folded into the MOVCC.
2244 if (!isPredicable(*MI))
2245 return nullptr;
2246 // Check if MI has any non-dead defs or physreg uses. This also detects
2247 // predicated instructions which will be reading CPSR.
2248 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
2249 const MachineOperand &MO = MI->getOperand(i);
2250 // Reject frame index operands, PEI can't handle the predicated pseudos.
2251 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2252 return nullptr;
2253 if (!MO.isReg())
2254 continue;
2255 // MI can't have any tied operands, that would conflict with predication.
2256 if (MO.isTied())
2257 return nullptr;
2258 if (Register::isPhysicalRegister(MO.getReg()))
2259 return nullptr;
2260 if (MO.isDef() && !MO.isDead())
2261 return nullptr;
2262 }
2263 bool DontMoveAcrossStores = true;
2264 if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
2265 return nullptr;
2266 return MI;
2267 }
2268
analyzeSelect(const MachineInstr & MI,SmallVectorImpl<MachineOperand> & Cond,unsigned & TrueOp,unsigned & FalseOp,bool & Optimizable) const2269 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI,
2270 SmallVectorImpl<MachineOperand> &Cond,
2271 unsigned &TrueOp, unsigned &FalseOp,
2272 bool &Optimizable) const {
2273 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
2274 "Unknown select instruction");
2275 // MOVCC operands:
2276 // 0: Def.
2277 // 1: True use.
2278 // 2: False use.
2279 // 3: Condition code.
2280 // 4: CPSR use.
2281 TrueOp = 1;
2282 FalseOp = 2;
2283 Cond.push_back(MI.getOperand(3));
2284 Cond.push_back(MI.getOperand(4));
2285 // We can always fold a def.
2286 Optimizable = true;
2287 return false;
2288 }
2289
2290 MachineInstr *
optimizeSelect(MachineInstr & MI,SmallPtrSetImpl<MachineInstr * > & SeenMIs,bool PreferFalse) const2291 ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI,
2292 SmallPtrSetImpl<MachineInstr *> &SeenMIs,
2293 bool PreferFalse) const {
2294 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
2295 "Unknown select instruction");
2296 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2297 MachineInstr *DefMI = canFoldIntoMOVCC(MI.getOperand(2).getReg(), MRI, this);
2298 bool Invert = !DefMI;
2299 if (!DefMI)
2300 DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this);
2301 if (!DefMI)
2302 return nullptr;
2303
2304 // Find new register class to use.
2305 MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1);
2306 Register DestReg = MI.getOperand(0).getReg();
2307 const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
2308 if (!MRI.constrainRegClass(DestReg, PreviousClass))
2309 return nullptr;
2310
2311 // Create a new predicated version of DefMI.
2312 // Rfalse is the first use.
2313 MachineInstrBuilder NewMI =
2314 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg);
2315
2316 // Copy all the DefMI operands, excluding its (null) predicate.
2317 const MCInstrDesc &DefDesc = DefMI->getDesc();
2318 for (unsigned i = 1, e = DefDesc.getNumOperands();
2319 i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
2320 NewMI.add(DefMI->getOperand(i));
2321
2322 unsigned CondCode = MI.getOperand(3).getImm();
2323 if (Invert)
2324 NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode)));
2325 else
2326 NewMI.addImm(CondCode);
2327 NewMI.add(MI.getOperand(4));
2328
2329 // DefMI is not the -S version that sets CPSR, so add an optional %noreg.
2330 if (NewMI->hasOptionalDef())
2331 NewMI.add(condCodeOp());
2332
2333 // The output register value when the predicate is false is an implicit
2334 // register operand tied to the first def.
2335 // The tie makes the register allocator ensure the FalseReg is allocated the
2336 // same register as operand 0.
2337 FalseReg.setImplicit();
2338 NewMI.add(FalseReg);
2339 NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
2340
2341 // Update SeenMIs set: register newly created MI and erase removed DefMI.
2342 SeenMIs.insert(NewMI);
2343 SeenMIs.erase(DefMI);
2344
2345 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
2346 // DefMI would be invalid when tranferred inside the loop. Checking for a
2347 // loop is expensive, but at least remove kill flags if they are in different
2348 // BBs.
2349 if (DefMI->getParent() != MI.getParent())
2350 NewMI->clearKillInfo();
2351
2352 // The caller will erase MI, but not DefMI.
2353 DefMI->eraseFromParent();
2354 return NewMI;
2355 }
2356
2357 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
2358 /// instruction is encoded with an 'S' bit is determined by the optional CPSR
2359 /// def operand.
2360 ///
2361 /// This will go away once we can teach tblgen how to set the optional CPSR def
2362 /// operand itself.
2363 struct AddSubFlagsOpcodePair {
2364 uint16_t PseudoOpc;
2365 uint16_t MachineOpc;
2366 };
2367
2368 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
2369 {ARM::ADDSri, ARM::ADDri},
2370 {ARM::ADDSrr, ARM::ADDrr},
2371 {ARM::ADDSrsi, ARM::ADDrsi},
2372 {ARM::ADDSrsr, ARM::ADDrsr},
2373
2374 {ARM::SUBSri, ARM::SUBri},
2375 {ARM::SUBSrr, ARM::SUBrr},
2376 {ARM::SUBSrsi, ARM::SUBrsi},
2377 {ARM::SUBSrsr, ARM::SUBrsr},
2378
2379 {ARM::RSBSri, ARM::RSBri},
2380 {ARM::RSBSrsi, ARM::RSBrsi},
2381 {ARM::RSBSrsr, ARM::RSBrsr},
2382
2383 {ARM::tADDSi3, ARM::tADDi3},
2384 {ARM::tADDSi8, ARM::tADDi8},
2385 {ARM::tADDSrr, ARM::tADDrr},
2386 {ARM::tADCS, ARM::tADC},
2387
2388 {ARM::tSUBSi3, ARM::tSUBi3},
2389 {ARM::tSUBSi8, ARM::tSUBi8},
2390 {ARM::tSUBSrr, ARM::tSUBrr},
2391 {ARM::tSBCS, ARM::tSBC},
2392 {ARM::tRSBS, ARM::tRSB},
2393 {ARM::tLSLSri, ARM::tLSLri},
2394
2395 {ARM::t2ADDSri, ARM::t2ADDri},
2396 {ARM::t2ADDSrr, ARM::t2ADDrr},
2397 {ARM::t2ADDSrs, ARM::t2ADDrs},
2398
2399 {ARM::t2SUBSri, ARM::t2SUBri},
2400 {ARM::t2SUBSrr, ARM::t2SUBrr},
2401 {ARM::t2SUBSrs, ARM::t2SUBrs},
2402
2403 {ARM::t2RSBSri, ARM::t2RSBri},
2404 {ARM::t2RSBSrs, ARM::t2RSBrs},
2405 };
2406
convertAddSubFlagsOpcode(unsigned OldOpc)2407 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
2408 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i)
2409 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc)
2410 return AddSubFlagsOpcodeMap[i].MachineOpc;
2411 return 0;
2412 }
2413
emitARMRegPlusImmediate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,Register DestReg,Register BaseReg,int NumBytes,ARMCC::CondCodes Pred,Register PredReg,const ARMBaseInstrInfo & TII,unsigned MIFlags)2414 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
2415 MachineBasicBlock::iterator &MBBI,
2416 const DebugLoc &dl, Register DestReg,
2417 Register BaseReg, int NumBytes,
2418 ARMCC::CondCodes Pred, Register PredReg,
2419 const ARMBaseInstrInfo &TII,
2420 unsigned MIFlags) {
2421 if (NumBytes == 0 && DestReg != BaseReg) {
2422 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg)
2423 .addReg(BaseReg, RegState::Kill)
2424 .add(predOps(Pred, PredReg))
2425 .add(condCodeOp())
2426 .setMIFlags(MIFlags);
2427 return;
2428 }
2429
2430 bool isSub = NumBytes < 0;
2431 if (isSub) NumBytes = -NumBytes;
2432
2433 while (NumBytes) {
2434 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
2435 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
2436 assert(ThisVal && "Didn't extract field correctly");
2437
2438 // We will handle these bits from offset, clear them.
2439 NumBytes &= ~ThisVal;
2440
2441 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
2442
2443 // Build the new ADD / SUB.
2444 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2445 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
2446 .addReg(BaseReg, RegState::Kill)
2447 .addImm(ThisVal)
2448 .add(predOps(Pred, PredReg))
2449 .add(condCodeOp())
2450 .setMIFlags(MIFlags);
2451 BaseReg = DestReg;
2452 }
2453 }
2454
tryFoldSPUpdateIntoPushPop(const ARMSubtarget & Subtarget,MachineFunction & MF,MachineInstr * MI,unsigned NumBytes)2455 bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
2456 MachineFunction &MF, MachineInstr *MI,
2457 unsigned NumBytes) {
2458 // This optimisation potentially adds lots of load and store
2459 // micro-operations, it's only really a great benefit to code-size.
2460 if (!Subtarget.hasMinSize())
2461 return false;
2462
2463 // If only one register is pushed/popped, LLVM can use an LDR/STR
2464 // instead. We can't modify those so make sure we're dealing with an
2465 // instruction we understand.
2466 bool IsPop = isPopOpcode(MI->getOpcode());
2467 bool IsPush = isPushOpcode(MI->getOpcode());
2468 if (!IsPush && !IsPop)
2469 return false;
2470
2471 bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD ||
2472 MI->getOpcode() == ARM::VLDMDIA_UPD;
2473 bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH ||
2474 MI->getOpcode() == ARM::tPOP ||
2475 MI->getOpcode() == ARM::tPOP_RET;
2476
2477 assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP &&
2478 MI->getOperand(1).getReg() == ARM::SP)) &&
2479 "trying to fold sp update into non-sp-updating push/pop");
2480
2481 // The VFP push & pop act on D-registers, so we can only fold an adjustment
2482 // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try
2483 // if this is violated.
2484 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2485 return false;
2486
2487 // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
2488 // pred) so the list starts at 4. Thumb1 starts after the predicate.
2489 int RegListIdx = IsT1PushPop ? 2 : 4;
2490
2491 // Calculate the space we'll need in terms of registers.
2492 unsigned RegsNeeded;
2493 const TargetRegisterClass *RegClass;
2494 if (IsVFPPushPop) {
2495 RegsNeeded = NumBytes / 8;
2496 RegClass = &ARM::DPRRegClass;
2497 } else {
2498 RegsNeeded = NumBytes / 4;
2499 RegClass = &ARM::GPRRegClass;
2500 }
2501
2502 // We're going to have to strip all list operands off before
2503 // re-adding them since the order matters, so save the existing ones
2504 // for later.
2505 SmallVector<MachineOperand, 4> RegList;
2506
2507 // We're also going to need the first register transferred by this
2508 // instruction, which won't necessarily be the first register in the list.
2509 unsigned FirstRegEnc = -1;
2510
2511 const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo();
2512 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2513 MachineOperand &MO = MI->getOperand(i);
2514 RegList.push_back(MO);
2515
2516 if (MO.isReg() && !MO.isImplicit() &&
2517 TRI->getEncodingValue(MO.getReg()) < FirstRegEnc)
2518 FirstRegEnc = TRI->getEncodingValue(MO.getReg());
2519 }
2520
2521 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
2522
2523 // Now try to find enough space in the reglist to allocate NumBytes.
2524 for (int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2525 --CurRegEnc) {
2526 unsigned CurReg = RegClass->getRegister(CurRegEnc);
2527 if (IsT1PushPop && CurRegEnc > TRI->getEncodingValue(ARM::R7))
2528 continue;
2529 if (!IsPop) {
2530 // Pushing any register is completely harmless, mark the register involved
2531 // as undef since we don't care about its value and must not restore it
2532 // during stack unwinding.
2533 RegList.push_back(MachineOperand::CreateReg(CurReg, false, false,
2534 false, false, true));
2535 --RegsNeeded;
2536 continue;
2537 }
2538
2539 // However, we can only pop an extra register if it's not live. For
2540 // registers live within the function we might clobber a return value
2541 // register; the other way a register can be live here is if it's
2542 // callee-saved.
2543 if (isCalleeSavedRegister(CurReg, CSRegs) ||
2544 MI->getParent()->computeRegisterLiveness(TRI, CurReg, MI) !=
2545 MachineBasicBlock::LQR_Dead) {
2546 // VFP pops don't allow holes in the register list, so any skip is fatal
2547 // for our transformation. GPR pops do, so we should just keep looking.
2548 if (IsVFPPushPop)
2549 return false;
2550 else
2551 continue;
2552 }
2553
2554 // Mark the unimportant registers as <def,dead> in the POP.
2555 RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false,
2556 true));
2557 --RegsNeeded;
2558 }
2559
2560 if (RegsNeeded > 0)
2561 return false;
2562
2563 // Finally we know we can profitably perform the optimisation so go
2564 // ahead: strip all existing registers off and add them back again
2565 // in the right order.
2566 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
2567 MI->RemoveOperand(i);
2568
2569 // Add the complete list back in.
2570 MachineInstrBuilder MIB(MF, &*MI);
2571 for (int i = RegList.size() - 1; i >= 0; --i)
2572 MIB.add(RegList[i]);
2573
2574 return true;
2575 }
2576
rewriteARMFrameIndex(MachineInstr & MI,unsigned FrameRegIdx,Register FrameReg,int & Offset,const ARMBaseInstrInfo & TII)2577 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2578 Register FrameReg, int &Offset,
2579 const ARMBaseInstrInfo &TII) {
2580 unsigned Opcode = MI.getOpcode();
2581 const MCInstrDesc &Desc = MI.getDesc();
2582 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
2583 bool isSub = false;
2584
2585 // Memory operands in inline assembly always use AddrMode2.
2586 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2587 AddrMode = ARMII::AddrMode2;
2588
2589 if (Opcode == ARM::ADDri) {
2590 Offset += MI.getOperand(FrameRegIdx+1).getImm();
2591 if (Offset == 0) {
2592 // Turn it into a move.
2593 MI.setDesc(TII.get(ARM::MOVr));
2594 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2595 MI.RemoveOperand(FrameRegIdx+1);
2596 Offset = 0;
2597 return true;
2598 } else if (Offset < 0) {
2599 Offset = -Offset;
2600 isSub = true;
2601 MI.setDesc(TII.get(ARM::SUBri));
2602 }
2603
2604 // Common case: small offset, fits into instruction.
2605 if (ARM_AM::getSOImmVal(Offset) != -1) {
2606 // Replace the FrameIndex with sp / fp
2607 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2608 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
2609 Offset = 0;
2610 return true;
2611 }
2612
2613 // Otherwise, pull as much of the immedidate into this ADDri/SUBri
2614 // as possible.
2615 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
2616 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
2617
2618 // We will handle these bits from offset, clear them.
2619 Offset &= ~ThisImmVal;
2620
2621 // Get the properly encoded SOImmVal field.
2622 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
2623 "Bit extraction didn't work?");
2624 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2625 } else {
2626 unsigned ImmIdx = 0;
2627 int InstrOffs = 0;
2628 unsigned NumBits = 0;
2629 unsigned Scale = 1;
2630 switch (AddrMode) {
2631 case ARMII::AddrMode_i12:
2632 ImmIdx = FrameRegIdx + 1;
2633 InstrOffs = MI.getOperand(ImmIdx).getImm();
2634 NumBits = 12;
2635 break;
2636 case ARMII::AddrMode2:
2637 ImmIdx = FrameRegIdx+2;
2638 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
2639 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2640 InstrOffs *= -1;
2641 NumBits = 12;
2642 break;
2643 case ARMII::AddrMode3:
2644 ImmIdx = FrameRegIdx+2;
2645 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
2646 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2647 InstrOffs *= -1;
2648 NumBits = 8;
2649 break;
2650 case ARMII::AddrMode4:
2651 case ARMII::AddrMode6:
2652 // Can't fold any offset even if it's zero.
2653 return false;
2654 case ARMII::AddrMode5:
2655 ImmIdx = FrameRegIdx+1;
2656 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
2657 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2658 InstrOffs *= -1;
2659 NumBits = 8;
2660 Scale = 4;
2661 break;
2662 case ARMII::AddrMode5FP16:
2663 ImmIdx = FrameRegIdx+1;
2664 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
2665 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
2666 InstrOffs *= -1;
2667 NumBits = 8;
2668 Scale = 2;
2669 break;
2670 case ARMII::AddrModeT2_i7:
2671 case ARMII::AddrModeT2_i7s2:
2672 case ARMII::AddrModeT2_i7s4:
2673 ImmIdx = FrameRegIdx+1;
2674 InstrOffs = MI.getOperand(ImmIdx).getImm();
2675 NumBits = 7;
2676 Scale = (AddrMode == ARMII::AddrModeT2_i7s2 ? 2 :
2677 AddrMode == ARMII::AddrModeT2_i7s4 ? 4 : 1);
2678 break;
2679 default:
2680 llvm_unreachable("Unsupported addressing mode!");
2681 }
2682
2683 Offset += InstrOffs * Scale;
2684 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
2685 if (Offset < 0) {
2686 Offset = -Offset;
2687 isSub = true;
2688 }
2689
2690 // Attempt to fold address comp. if opcode has offset bits
2691 if (NumBits > 0) {
2692 // Common case: small offset, fits into instruction.
2693 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
2694 int ImmedOffset = Offset / Scale;
2695 unsigned Mask = (1 << NumBits) - 1;
2696 if ((unsigned)Offset <= Mask * Scale) {
2697 // Replace the FrameIndex with sp
2698 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2699 // FIXME: When addrmode2 goes away, this will simplify (like the
2700 // T2 version), as the LDR.i12 versions don't need the encoding
2701 // tricks for the offset value.
2702 if (isSub) {
2703 if (AddrMode == ARMII::AddrMode_i12)
2704 ImmedOffset = -ImmedOffset;
2705 else
2706 ImmedOffset |= 1 << NumBits;
2707 }
2708 ImmOp.ChangeToImmediate(ImmedOffset);
2709 Offset = 0;
2710 return true;
2711 }
2712
2713 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
2714 ImmedOffset = ImmedOffset & Mask;
2715 if (isSub) {
2716 if (AddrMode == ARMII::AddrMode_i12)
2717 ImmedOffset = -ImmedOffset;
2718 else
2719 ImmedOffset |= 1 << NumBits;
2720 }
2721 ImmOp.ChangeToImmediate(ImmedOffset);
2722 Offset &= ~(Mask*Scale);
2723 }
2724 }
2725
2726 Offset = (isSub) ? -Offset : Offset;
2727 return Offset == 0;
2728 }
2729
2730 /// analyzeCompare - For a comparison instruction, return the source registers
2731 /// in SrcReg and SrcReg2 if having two register operands, and the value it
2732 /// compares against in CmpValue. Return true if the comparison instruction
2733 /// can be analyzed.
analyzeCompare(const MachineInstr & MI,Register & SrcReg,Register & SrcReg2,int & CmpMask,int & CmpValue) const2734 bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
2735 Register &SrcReg2, int &CmpMask,
2736 int &CmpValue) const {
2737 switch (MI.getOpcode()) {
2738 default: break;
2739 case ARM::CMPri:
2740 case ARM::t2CMPri:
2741 case ARM::tCMPi8:
2742 SrcReg = MI.getOperand(0).getReg();
2743 SrcReg2 = 0;
2744 CmpMask = ~0;
2745 CmpValue = MI.getOperand(1).getImm();
2746 return true;
2747 case ARM::CMPrr:
2748 case ARM::t2CMPrr:
2749 case ARM::tCMPr:
2750 SrcReg = MI.getOperand(0).getReg();
2751 SrcReg2 = MI.getOperand(1).getReg();
2752 CmpMask = ~0;
2753 CmpValue = 0;
2754 return true;
2755 case ARM::TSTri:
2756 case ARM::t2TSTri:
2757 SrcReg = MI.getOperand(0).getReg();
2758 SrcReg2 = 0;
2759 CmpMask = MI.getOperand(1).getImm();
2760 CmpValue = 0;
2761 return true;
2762 }
2763
2764 return false;
2765 }
2766
2767 /// isSuitableForMask - Identify a suitable 'and' instruction that
2768 /// operates on the given source register and applies the same mask
2769 /// as a 'tst' instruction. Provide a limited look-through for copies.
2770 /// When successful, MI will hold the found instruction.
isSuitableForMask(MachineInstr * & MI,Register SrcReg,int CmpMask,bool CommonUse)2771 static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg,
2772 int CmpMask, bool CommonUse) {
2773 switch (MI->getOpcode()) {
2774 case ARM::ANDri:
2775 case ARM::t2ANDri:
2776 if (CmpMask != MI->getOperand(2).getImm())
2777 return false;
2778 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
2779 return true;
2780 break;
2781 }
2782
2783 return false;
2784 }
2785
2786 /// getCmpToAddCondition - assume the flags are set by CMP(a,b), return
2787 /// the condition code if we modify the instructions such that flags are
2788 /// set by ADD(a,b,X).
getCmpToAddCondition(ARMCC::CondCodes CC)2789 inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) {
2790 switch (CC) {
2791 default: return ARMCC::AL;
2792 case ARMCC::HS: return ARMCC::LO;
2793 case ARMCC::LO: return ARMCC::HS;
2794 case ARMCC::VS: return ARMCC::VS;
2795 case ARMCC::VC: return ARMCC::VC;
2796 }
2797 }
2798
2799 /// isRedundantFlagInstr - check whether the first instruction, whose only
2800 /// purpose is to update flags, can be made redundant.
2801 /// CMPrr can be made redundant by SUBrr if the operands are the same.
2802 /// CMPri can be made redundant by SUBri if the operands are the same.
2803 /// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X).
2804 /// This function can be extended later on.
isRedundantFlagInstr(const MachineInstr * CmpI,Register SrcReg,Register SrcReg2,int ImmValue,const MachineInstr * OI,bool & IsThumb1)2805 inline static bool isRedundantFlagInstr(const MachineInstr *CmpI,
2806 Register SrcReg, Register SrcReg2,
2807 int ImmValue, const MachineInstr *OI,
2808 bool &IsThumb1) {
2809 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
2810 (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) &&
2811 ((OI->getOperand(1).getReg() == SrcReg &&
2812 OI->getOperand(2).getReg() == SrcReg2) ||
2813 (OI->getOperand(1).getReg() == SrcReg2 &&
2814 OI->getOperand(2).getReg() == SrcReg))) {
2815 IsThumb1 = false;
2816 return true;
2817 }
2818
2819 if (CmpI->getOpcode() == ARM::tCMPr && OI->getOpcode() == ARM::tSUBrr &&
2820 ((OI->getOperand(2).getReg() == SrcReg &&
2821 OI->getOperand(3).getReg() == SrcReg2) ||
2822 (OI->getOperand(2).getReg() == SrcReg2 &&
2823 OI->getOperand(3).getReg() == SrcReg))) {
2824 IsThumb1 = true;
2825 return true;
2826 }
2827
2828 if ((CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) &&
2829 (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) &&
2830 OI->getOperand(1).getReg() == SrcReg &&
2831 OI->getOperand(2).getImm() == ImmValue) {
2832 IsThumb1 = false;
2833 return true;
2834 }
2835
2836 if (CmpI->getOpcode() == ARM::tCMPi8 &&
2837 (OI->getOpcode() == ARM::tSUBi8 || OI->getOpcode() == ARM::tSUBi3) &&
2838 OI->getOperand(2).getReg() == SrcReg &&
2839 OI->getOperand(3).getImm() == ImmValue) {
2840 IsThumb1 = true;
2841 return true;
2842 }
2843
2844 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
2845 (OI->getOpcode() == ARM::ADDrr || OI->getOpcode() == ARM::t2ADDrr ||
2846 OI->getOpcode() == ARM::ADDri || OI->getOpcode() == ARM::t2ADDri) &&
2847 OI->getOperand(0).isReg() && OI->getOperand(1).isReg() &&
2848 OI->getOperand(0).getReg() == SrcReg &&
2849 OI->getOperand(1).getReg() == SrcReg2) {
2850 IsThumb1 = false;
2851 return true;
2852 }
2853
2854 if (CmpI->getOpcode() == ARM::tCMPr &&
2855 (OI->getOpcode() == ARM::tADDi3 || OI->getOpcode() == ARM::tADDi8 ||
2856 OI->getOpcode() == ARM::tADDrr) &&
2857 OI->getOperand(0).getReg() == SrcReg &&
2858 OI->getOperand(2).getReg() == SrcReg2) {
2859 IsThumb1 = true;
2860 return true;
2861 }
2862
2863 return false;
2864 }
2865
isOptimizeCompareCandidate(MachineInstr * MI,bool & IsThumb1)2866 static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) {
2867 switch (MI->getOpcode()) {
2868 default: return false;
2869 case ARM::tLSLri:
2870 case ARM::tLSRri:
2871 case ARM::tLSLrr:
2872 case ARM::tLSRrr:
2873 case ARM::tSUBrr:
2874 case ARM::tADDrr:
2875 case ARM::tADDi3:
2876 case ARM::tADDi8:
2877 case ARM::tSUBi3:
2878 case ARM::tSUBi8:
2879 case ARM::tMUL:
2880 case ARM::tADC:
2881 case ARM::tSBC:
2882 case ARM::tRSB:
2883 case ARM::tAND:
2884 case ARM::tORR:
2885 case ARM::tEOR:
2886 case ARM::tBIC:
2887 case ARM::tMVN:
2888 case ARM::tASRri:
2889 case ARM::tASRrr:
2890 case ARM::tROR:
2891 IsThumb1 = true;
2892 LLVM_FALLTHROUGH;
2893 case ARM::RSBrr:
2894 case ARM::RSBri:
2895 case ARM::RSCrr:
2896 case ARM::RSCri:
2897 case ARM::ADDrr:
2898 case ARM::ADDri:
2899 case ARM::ADCrr:
2900 case ARM::ADCri:
2901 case ARM::SUBrr:
2902 case ARM::SUBri:
2903 case ARM::SBCrr:
2904 case ARM::SBCri:
2905 case ARM::t2RSBri:
2906 case ARM::t2ADDrr:
2907 case ARM::t2ADDri:
2908 case ARM::t2ADCrr:
2909 case ARM::t2ADCri:
2910 case ARM::t2SUBrr:
2911 case ARM::t2SUBri:
2912 case ARM::t2SBCrr:
2913 case ARM::t2SBCri:
2914 case ARM::ANDrr:
2915 case ARM::ANDri:
2916 case ARM::t2ANDrr:
2917 case ARM::t2ANDri:
2918 case ARM::ORRrr:
2919 case ARM::ORRri:
2920 case ARM::t2ORRrr:
2921 case ARM::t2ORRri:
2922 case ARM::EORrr:
2923 case ARM::EORri:
2924 case ARM::t2EORrr:
2925 case ARM::t2EORri:
2926 case ARM::t2LSRri:
2927 case ARM::t2LSRrr:
2928 case ARM::t2LSLri:
2929 case ARM::t2LSLrr:
2930 return true;
2931 }
2932 }
2933
2934 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
2935 /// comparison into one that sets the zero bit in the flags register;
2936 /// Remove a redundant Compare instruction if an earlier instruction can set the
2937 /// flags in the same way as Compare.
2938 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
2939 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
2940 /// condition code of instructions which use the flags.
optimizeCompareInstr(MachineInstr & CmpInstr,Register SrcReg,Register SrcReg2,int CmpMask,int CmpValue,const MachineRegisterInfo * MRI) const2941 bool ARMBaseInstrInfo::optimizeCompareInstr(
2942 MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int CmpMask,
2943 int CmpValue, const MachineRegisterInfo *MRI) const {
2944 // Get the unique definition of SrcReg.
2945 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
2946 if (!MI) return false;
2947
2948 // Masked compares sometimes use the same register as the corresponding 'and'.
2949 if (CmpMask != ~0) {
2950 if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) {
2951 MI = nullptr;
2952 for (MachineRegisterInfo::use_instr_iterator
2953 UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end();
2954 UI != UE; ++UI) {
2955 if (UI->getParent() != CmpInstr.getParent())
2956 continue;
2957 MachineInstr *PotentialAND = &*UI;
2958 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
2959 isPredicated(*PotentialAND))
2960 continue;
2961 MI = PotentialAND;
2962 break;
2963 }
2964 if (!MI) return false;
2965 }
2966 }
2967
2968 // Get ready to iterate backward from CmpInstr.
2969 MachineBasicBlock::iterator I = CmpInstr, E = MI,
2970 B = CmpInstr.getParent()->begin();
2971
2972 // Early exit if CmpInstr is at the beginning of the BB.
2973 if (I == B) return false;
2974
2975 // There are two possible candidates which can be changed to set CPSR:
2976 // One is MI, the other is a SUB or ADD instruction.
2977 // For CMPrr(r1,r2), we are looking for SUB(r1,r2), SUB(r2,r1), or
2978 // ADDr[ri](r1, r2, X).
2979 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
2980 MachineInstr *SubAdd = nullptr;
2981 if (SrcReg2 != 0)
2982 // MI is not a candidate for CMPrr.
2983 MI = nullptr;
2984 else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) {
2985 // Conservatively refuse to convert an instruction which isn't in the same
2986 // BB as the comparison.
2987 // For CMPri w/ CmpValue != 0, a SubAdd may still be a candidate.
2988 // Thus we cannot return here.
2989 if (CmpInstr.getOpcode() == ARM::CMPri ||
2990 CmpInstr.getOpcode() == ARM::t2CMPri ||
2991 CmpInstr.getOpcode() == ARM::tCMPi8)
2992 MI = nullptr;
2993 else
2994 return false;
2995 }
2996
2997 bool IsThumb1 = false;
2998 if (MI && !isOptimizeCompareCandidate(MI, IsThumb1))
2999 return false;
3000
3001 // We also want to do this peephole for cases like this: if (a*b == 0),
3002 // and optimise away the CMP instruction from the generated code sequence:
3003 // MULS, MOVS, MOVS, CMP. Here the MOVS instructions load the boolean values
3004 // resulting from the select instruction, but these MOVS instructions for
3005 // Thumb1 (V6M) are flag setting and are thus preventing this optimisation.
3006 // However, if we only have MOVS instructions in between the CMP and the
3007 // other instruction (the MULS in this example), then the CPSR is dead so we
3008 // can safely reorder the sequence into: MOVS, MOVS, MULS, CMP. We do this
3009 // reordering and then continue the analysis hoping we can eliminate the
3010 // CMP. This peephole works on the vregs, so is still in SSA form. As a
3011 // consequence, the movs won't redefine/kill the MUL operands which would
3012 // make this reordering illegal.
3013 const TargetRegisterInfo *TRI = &getRegisterInfo();
3014 if (MI && IsThumb1) {
3015 --I;
3016 if (I != E && !MI->readsRegister(ARM::CPSR, TRI)) {
3017 bool CanReorder = true;
3018 for (; I != E; --I) {
3019 if (I->getOpcode() != ARM::tMOVi8) {
3020 CanReorder = false;
3021 break;
3022 }
3023 }
3024 if (CanReorder) {
3025 MI = MI->removeFromParent();
3026 E = CmpInstr;
3027 CmpInstr.getParent()->insert(E, MI);
3028 }
3029 }
3030 I = CmpInstr;
3031 E = MI;
3032 }
3033
3034 // Check that CPSR isn't set between the comparison instruction and the one we
3035 // want to change. At the same time, search for SubAdd.
3036 bool SubAddIsThumb1 = false;
3037 do {
3038 const MachineInstr &Instr = *--I;
3039
3040 // Check whether CmpInstr can be made redundant by the current instruction.
3041 if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &Instr,
3042 SubAddIsThumb1)) {
3043 SubAdd = &*I;
3044 break;
3045 }
3046
3047 // Allow E (which was initially MI) to be SubAdd but do not search before E.
3048 if (I == E)
3049 break;
3050
3051 if (Instr.modifiesRegister(ARM::CPSR, TRI) ||
3052 Instr.readsRegister(ARM::CPSR, TRI))
3053 // This instruction modifies or uses CPSR after the one we want to
3054 // change. We can't do this transformation.
3055 return false;
3056
3057 if (I == B) {
3058 // In some cases, we scan the use-list of an instruction for an AND;
3059 // that AND is in the same BB, but may not be scheduled before the
3060 // corresponding TST. In that case, bail out.
3061 //
3062 // FIXME: We could try to reschedule the AND.
3063 return false;
3064 }
3065 } while (true);
3066
3067 // Return false if no candidates exist.
3068 if (!MI && !SubAdd)
3069 return false;
3070
3071 // If we found a SubAdd, use it as it will be closer to the CMP
3072 if (SubAdd) {
3073 MI = SubAdd;
3074 IsThumb1 = SubAddIsThumb1;
3075 }
3076
3077 // We can't use a predicated instruction - it doesn't always write the flags.
3078 if (isPredicated(*MI))
3079 return false;
3080
3081 // Scan forward for the use of CPSR
3082 // When checking against MI: if it's a conditional code that requires
3083 // checking of the V bit or C bit, then this is not safe to do.
3084 // It is safe to remove CmpInstr if CPSR is redefined or killed.
3085 // If we are done with the basic block, we need to check whether CPSR is
3086 // live-out.
3087 SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4>
3088 OperandsToUpdate;
3089 bool isSafe = false;
3090 I = CmpInstr;
3091 E = CmpInstr.getParent()->end();
3092 while (!isSafe && ++I != E) {
3093 const MachineInstr &Instr = *I;
3094 for (unsigned IO = 0, EO = Instr.getNumOperands();
3095 !isSafe && IO != EO; ++IO) {
3096 const MachineOperand &MO = Instr.getOperand(IO);
3097 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) {
3098 isSafe = true;
3099 break;
3100 }
3101 if (!MO.isReg() || MO.getReg() != ARM::CPSR)
3102 continue;
3103 if (MO.isDef()) {
3104 isSafe = true;
3105 break;
3106 }
3107 // Condition code is after the operand before CPSR except for VSELs.
3108 ARMCC::CondCodes CC;
3109 bool IsInstrVSel = true;
3110 switch (Instr.getOpcode()) {
3111 default:
3112 IsInstrVSel = false;
3113 CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm();
3114 break;
3115 case ARM::VSELEQD:
3116 case ARM::VSELEQS:
3117 case ARM::VSELEQH:
3118 CC = ARMCC::EQ;
3119 break;
3120 case ARM::VSELGTD:
3121 case ARM::VSELGTS:
3122 case ARM::VSELGTH:
3123 CC = ARMCC::GT;
3124 break;
3125 case ARM::VSELGED:
3126 case ARM::VSELGES:
3127 case ARM::VSELGEH:
3128 CC = ARMCC::GE;
3129 break;
3130 case ARM::VSELVSD:
3131 case ARM::VSELVSS:
3132 case ARM::VSELVSH:
3133 CC = ARMCC::VS;
3134 break;
3135 }
3136
3137 if (SubAdd) {
3138 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based
3139 // on CMP needs to be updated to be based on SUB.
3140 // If we have ADD(r1, r2, X) and CMP(r1, r2), the condition code also
3141 // needs to be modified.
3142 // Push the condition code operands to OperandsToUpdate.
3143 // If it is safe to remove CmpInstr, the condition code of these
3144 // operands will be modified.
3145 unsigned Opc = SubAdd->getOpcode();
3146 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
3147 Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
3148 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
3149 Opc == ARM::tSUBi8;
3150 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
3151 if (!IsSub ||
3152 (SrcReg2 != 0 && SubAdd->getOperand(OpI).getReg() == SrcReg2 &&
3153 SubAdd->getOperand(OpI + 1).getReg() == SrcReg)) {
3154 // VSel doesn't support condition code update.
3155 if (IsInstrVSel)
3156 return false;
3157 // Ensure we can swap the condition.
3158 ARMCC::CondCodes NewCC = (IsSub ? getSwappedCondition(CC) : getCmpToAddCondition(CC));
3159 if (NewCC == ARMCC::AL)
3160 return false;
3161 OperandsToUpdate.push_back(
3162 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3163 }
3164 } else {
3165 // No SubAdd, so this is x = <op> y, z; cmp x, 0.
3166 switch (CC) {
3167 case ARMCC::EQ: // Z
3168 case ARMCC::NE: // Z
3169 case ARMCC::MI: // N
3170 case ARMCC::PL: // N
3171 case ARMCC::AL: // none
3172 // CPSR can be used multiple times, we should continue.
3173 break;
3174 case ARMCC::HS: // C
3175 case ARMCC::LO: // C
3176 case ARMCC::VS: // V
3177 case ARMCC::VC: // V
3178 case ARMCC::HI: // C Z
3179 case ARMCC::LS: // C Z
3180 case ARMCC::GE: // N V
3181 case ARMCC::LT: // N V
3182 case ARMCC::GT: // Z N V
3183 case ARMCC::LE: // Z N V
3184 // The instruction uses the V bit or C bit which is not safe.
3185 return false;
3186 }
3187 }
3188 }
3189 }
3190
3191 // If CPSR is not killed nor re-defined, we should check whether it is
3192 // live-out. If it is live-out, do not optimize.
3193 if (!isSafe) {
3194 MachineBasicBlock *MBB = CmpInstr.getParent();
3195 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
3196 SE = MBB->succ_end(); SI != SE; ++SI)
3197 if ((*SI)->isLiveIn(ARM::CPSR))
3198 return false;
3199 }
3200
3201 // Toggle the optional operand to CPSR (if it exists - in Thumb1 we always
3202 // set CPSR so this is represented as an explicit output)
3203 if (!IsThumb1) {
3204 MI->getOperand(5).setReg(ARM::CPSR);
3205 MI->getOperand(5).setIsDef(true);
3206 }
3207 assert(!isPredicated(*MI) && "Can't use flags from predicated instruction");
3208 CmpInstr.eraseFromParent();
3209
3210 // Modify the condition code of operands in OperandsToUpdate.
3211 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
3212 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
3213 for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++)
3214 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3215
3216 MI->clearRegisterDeads(ARM::CPSR);
3217
3218 return true;
3219 }
3220
shouldSink(const MachineInstr & MI) const3221 bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const {
3222 // Do not sink MI if it might be used to optimize a redundant compare.
3223 // We heuristically only look at the instruction immediately following MI to
3224 // avoid potentially searching the entire basic block.
3225 if (isPredicated(MI))
3226 return true;
3227 MachineBasicBlock::const_iterator Next = &MI;
3228 ++Next;
3229 Register SrcReg, SrcReg2;
3230 int CmpMask, CmpValue;
3231 bool IsThumb1;
3232 if (Next != MI.getParent()->end() &&
3233 analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) &&
3234 isRedundantFlagInstr(&*Next, SrcReg, SrcReg2, CmpValue, &MI, IsThumb1))
3235 return false;
3236 return true;
3237 }
3238
FoldImmediate(MachineInstr & UseMI,MachineInstr & DefMI,Register Reg,MachineRegisterInfo * MRI) const3239 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
3240 Register Reg,
3241 MachineRegisterInfo *MRI) const {
3242 // Fold large immediates into add, sub, or, xor.
3243 unsigned DefOpc = DefMI.getOpcode();
3244 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
3245 return false;
3246 if (!DefMI.getOperand(1).isImm())
3247 // Could be t2MOVi32imm @xx
3248 return false;
3249
3250 if (!MRI->hasOneNonDBGUse(Reg))
3251 return false;
3252
3253 const MCInstrDesc &DefMCID = DefMI.getDesc();
3254 if (DefMCID.hasOptionalDef()) {
3255 unsigned NumOps = DefMCID.getNumOperands();
3256 const MachineOperand &MO = DefMI.getOperand(NumOps - 1);
3257 if (MO.getReg() == ARM::CPSR && !MO.isDead())
3258 // If DefMI defines CPSR and it is not dead, it's obviously not safe
3259 // to delete DefMI.
3260 return false;
3261 }
3262
3263 const MCInstrDesc &UseMCID = UseMI.getDesc();
3264 if (UseMCID.hasOptionalDef()) {
3265 unsigned NumOps = UseMCID.getNumOperands();
3266 if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR)
3267 // If the instruction sets the flag, do not attempt this optimization
3268 // since it may change the semantics of the code.
3269 return false;
3270 }
3271
3272 unsigned UseOpc = UseMI.getOpcode();
3273 unsigned NewUseOpc = 0;
3274 uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm();
3275 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3276 bool Commute = false;
3277 switch (UseOpc) {
3278 default: return false;
3279 case ARM::SUBrr:
3280 case ARM::ADDrr:
3281 case ARM::ORRrr:
3282 case ARM::EORrr:
3283 case ARM::t2SUBrr:
3284 case ARM::t2ADDrr:
3285 case ARM::t2ORRrr:
3286 case ARM::t2EORrr: {
3287 Commute = UseMI.getOperand(2).getReg() != Reg;
3288 switch (UseOpc) {
3289 default: break;
3290 case ARM::ADDrr:
3291 case ARM::SUBrr:
3292 if (UseOpc == ARM::SUBrr && Commute)
3293 return false;
3294
3295 // ADD/SUB are special because they're essentially the same operation, so
3296 // we can handle a larger range of immediates.
3297 if (ARM_AM::isSOImmTwoPartVal(ImmVal))
3298 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3299 else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) {
3300 ImmVal = -ImmVal;
3301 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3302 } else
3303 return false;
3304 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
3305 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
3306 break;
3307 case ARM::ORRrr:
3308 case ARM::EORrr:
3309 if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
3310 return false;
3311 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
3312 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
3313 switch (UseOpc) {
3314 default: break;
3315 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
3316 case ARM::EORrr: NewUseOpc = ARM::EORri; break;
3317 }
3318 break;
3319 case ARM::t2ADDrr:
3320 case ARM::t2SUBrr: {
3321 if (UseOpc == ARM::t2SUBrr && Commute)
3322 return false;
3323
3324 // ADD/SUB are special because they're essentially the same operation, so
3325 // we can handle a larger range of immediates.
3326 const bool ToSP = DefMI.getOperand(0).getReg() == ARM::SP;
3327 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3328 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3329 if (ARM_AM::isT2SOImmTwoPartVal(ImmVal))
3330 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3331 else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) {
3332 ImmVal = -ImmVal;
3333 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3334 } else
3335 return false;
3336 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
3337 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
3338 break;
3339 }
3340 case ARM::t2ORRrr:
3341 case ARM::t2EORrr:
3342 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
3343 return false;
3344 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
3345 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
3346 switch (UseOpc) {
3347 default: break;
3348 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
3349 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
3350 }
3351 break;
3352 }
3353 }
3354 }
3355
3356 unsigned OpIdx = Commute ? 2 : 1;
3357 Register Reg1 = UseMI.getOperand(OpIdx).getReg();
3358 bool isKill = UseMI.getOperand(OpIdx).isKill();
3359 const TargetRegisterClass *TRC = MRI->getRegClass(Reg);
3360 Register NewReg = MRI->createVirtualRegister(TRC);
3361 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc),
3362 NewReg)
3363 .addReg(Reg1, getKillRegState(isKill))
3364 .addImm(SOImmValV1)
3365 .add(predOps(ARMCC::AL))
3366 .add(condCodeOp());
3367 UseMI.setDesc(get(NewUseOpc));
3368 UseMI.getOperand(1).setReg(NewReg);
3369 UseMI.getOperand(1).setIsKill();
3370 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3371 DefMI.eraseFromParent();
3372 // FIXME: t2ADDrr should be split, as different rulles apply when writing to SP.
3373 // Just as t2ADDri, that was split to [t2ADDri, t2ADDspImm].
3374 // Then the below code will not be needed, as the input/output register
3375 // classes will be rgpr or gprSP.
3376 // For now, we fix the UseMI operand explicitly here:
3377 switch(NewUseOpc){
3378 case ARM::t2ADDspImm:
3379 case ARM::t2SUBspImm:
3380 case ARM::t2ADDri:
3381 case ARM::t2SUBri:
3382 MRI->setRegClass(UseMI.getOperand(0).getReg(), TRC);
3383 }
3384 return true;
3385 }
3386
getNumMicroOpsSwiftLdSt(const InstrItineraryData * ItinData,const MachineInstr & MI)3387 static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
3388 const MachineInstr &MI) {
3389 switch (MI.getOpcode()) {
3390 default: {
3391 const MCInstrDesc &Desc = MI.getDesc();
3392 int UOps = ItinData->getNumMicroOps(Desc.getSchedClass());
3393 assert(UOps >= 0 && "bad # UOps");
3394 return UOps;
3395 }
3396
3397 case ARM::LDRrs:
3398 case ARM::LDRBrs:
3399 case ARM::STRrs:
3400 case ARM::STRBrs: {
3401 unsigned ShOpVal = MI.getOperand(3).getImm();
3402 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3403 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3404 if (!isSub &&
3405 (ShImm == 0 ||
3406 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3407 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3408 return 1;
3409 return 2;
3410 }
3411
3412 case ARM::LDRH:
3413 case ARM::STRH: {
3414 if (!MI.getOperand(2).getReg())
3415 return 1;
3416
3417 unsigned ShOpVal = MI.getOperand(3).getImm();
3418 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3419 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3420 if (!isSub &&
3421 (ShImm == 0 ||
3422 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3423 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3424 return 1;
3425 return 2;
3426 }
3427
3428 case ARM::LDRSB:
3429 case ARM::LDRSH:
3430 return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2;
3431
3432 case ARM::LDRSB_POST:
3433 case ARM::LDRSH_POST: {
3434 Register Rt = MI.getOperand(0).getReg();
3435 Register Rm = MI.getOperand(3).getReg();
3436 return (Rt == Rm) ? 4 : 3;
3437 }
3438
3439 case ARM::LDR_PRE_REG:
3440 case ARM::LDRB_PRE_REG: {
3441 Register Rt = MI.getOperand(0).getReg();
3442 Register Rm = MI.getOperand(3).getReg();
3443 if (Rt == Rm)
3444 return 3;
3445 unsigned ShOpVal = MI.getOperand(4).getImm();
3446 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3447 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3448 if (!isSub &&
3449 (ShImm == 0 ||
3450 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3451 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3452 return 2;
3453 return 3;
3454 }
3455
3456 case ARM::STR_PRE_REG:
3457 case ARM::STRB_PRE_REG: {
3458 unsigned ShOpVal = MI.getOperand(4).getImm();
3459 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3460 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3461 if (!isSub &&
3462 (ShImm == 0 ||
3463 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3464 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3465 return 2;
3466 return 3;
3467 }
3468
3469 case ARM::LDRH_PRE:
3470 case ARM::STRH_PRE: {
3471 Register Rt = MI.getOperand(0).getReg();
3472 Register Rm = MI.getOperand(3).getReg();
3473 if (!Rm)
3474 return 2;
3475 if (Rt == Rm)
3476 return 3;
3477 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2;
3478 }
3479
3480 case ARM::LDR_POST_REG:
3481 case ARM::LDRB_POST_REG:
3482 case ARM::LDRH_POST: {
3483 Register Rt = MI.getOperand(0).getReg();
3484 Register Rm = MI.getOperand(3).getReg();
3485 return (Rt == Rm) ? 3 : 2;
3486 }
3487
3488 case ARM::LDR_PRE_IMM:
3489 case ARM::LDRB_PRE_IMM:
3490 case ARM::LDR_POST_IMM:
3491 case ARM::LDRB_POST_IMM:
3492 case ARM::STRB_POST_IMM:
3493 case ARM::STRB_POST_REG:
3494 case ARM::STRB_PRE_IMM:
3495 case ARM::STRH_POST:
3496 case ARM::STR_POST_IMM:
3497 case ARM::STR_POST_REG:
3498 case ARM::STR_PRE_IMM:
3499 return 2;
3500
3501 case ARM::LDRSB_PRE:
3502 case ARM::LDRSH_PRE: {
3503 Register Rm = MI.getOperand(3).getReg();
3504 if (Rm == 0)
3505 return 3;
3506 Register Rt = MI.getOperand(0).getReg();
3507 if (Rt == Rm)
3508 return 4;
3509 unsigned ShOpVal = MI.getOperand(4).getImm();
3510 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
3511 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3512 if (!isSub &&
3513 (ShImm == 0 ||
3514 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3515 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
3516 return 3;
3517 return 4;
3518 }
3519
3520 case ARM::LDRD: {
3521 Register Rt = MI.getOperand(0).getReg();
3522 Register Rn = MI.getOperand(2).getReg();
3523 Register Rm = MI.getOperand(3).getReg();
3524 if (Rm)
3525 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
3526 : 3;
3527 return (Rt == Rn) ? 3 : 2;
3528 }
3529
3530 case ARM::STRD: {
3531 Register Rm = MI.getOperand(3).getReg();
3532 if (Rm)
3533 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
3534 : 3;
3535 return 2;
3536 }
3537
3538 case ARM::LDRD_POST:
3539 case ARM::t2LDRD_POST:
3540 return 3;
3541
3542 case ARM::STRD_POST:
3543 case ARM::t2STRD_POST:
3544 return 4;
3545
3546 case ARM::LDRD_PRE: {
3547 Register Rt = MI.getOperand(0).getReg();
3548 Register Rn = MI.getOperand(3).getReg();
3549 Register Rm = MI.getOperand(4).getReg();
3550 if (Rm)
3551 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
3552 : 4;
3553 return (Rt == Rn) ? 4 : 3;
3554 }
3555
3556 case ARM::t2LDRD_PRE: {
3557 Register Rt = MI.getOperand(0).getReg();
3558 Register Rn = MI.getOperand(3).getReg();
3559 return (Rt == Rn) ? 4 : 3;
3560 }
3561
3562 case ARM::STRD_PRE: {
3563 Register Rm = MI.getOperand(4).getReg();
3564 if (Rm)
3565 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
3566 : 4;
3567 return 3;
3568 }
3569
3570 case ARM::t2STRD_PRE:
3571 return 3;
3572
3573 case ARM::t2LDR_POST:
3574 case ARM::t2LDRB_POST:
3575 case ARM::t2LDRB_PRE:
3576 case ARM::t2LDRSBi12:
3577 case ARM::t2LDRSBi8:
3578 case ARM::t2LDRSBpci:
3579 case ARM::t2LDRSBs:
3580 case ARM::t2LDRH_POST:
3581 case ARM::t2LDRH_PRE:
3582 case ARM::t2LDRSBT:
3583 case ARM::t2LDRSB_POST:
3584 case ARM::t2LDRSB_PRE:
3585 case ARM::t2LDRSH_POST:
3586 case ARM::t2LDRSH_PRE:
3587 case ARM::t2LDRSHi12:
3588 case ARM::t2LDRSHi8:
3589 case ARM::t2LDRSHpci:
3590 case ARM::t2LDRSHs:
3591 return 2;
3592
3593 case ARM::t2LDRDi8: {
3594 Register Rt = MI.getOperand(0).getReg();
3595 Register Rn = MI.getOperand(2).getReg();
3596 return (Rt == Rn) ? 3 : 2;
3597 }
3598
3599 case ARM::t2STRB_POST:
3600 case ARM::t2STRB_PRE:
3601 case ARM::t2STRBs:
3602 case ARM::t2STRDi8:
3603 case ARM::t2STRH_POST:
3604 case ARM::t2STRH_PRE:
3605 case ARM::t2STRHs:
3606 case ARM::t2STR_POST:
3607 case ARM::t2STR_PRE:
3608 case ARM::t2STRs:
3609 return 2;
3610 }
3611 }
3612
3613 // Return the number of 32-bit words loaded by LDM or stored by STM. If this
3614 // can't be easily determined return 0 (missing MachineMemOperand).
3615 //
3616 // FIXME: The current MachineInstr design does not support relying on machine
3617 // mem operands to determine the width of a memory access. Instead, we expect
3618 // the target to provide this information based on the instruction opcode and
3619 // operands. However, using MachineMemOperand is the best solution now for
3620 // two reasons:
3621 //
3622 // 1) getNumMicroOps tries to infer LDM memory width from the total number of MI
3623 // operands. This is much more dangerous than using the MachineMemOperand
3624 // sizes because CodeGen passes can insert/remove optional machine operands. In
3625 // fact, it's totally incorrect for preRA passes and appears to be wrong for
3626 // postRA passes as well.
3627 //
3628 // 2) getNumLDMAddresses is only used by the scheduling machine model and any
3629 // machine model that calls this should handle the unknown (zero size) case.
3630 //
3631 // Long term, we should require a target hook that verifies MachineMemOperand
3632 // sizes during MC lowering. That target hook should be local to MC lowering
3633 // because we can't ensure that it is aware of other MI forms. Doing this will
3634 // ensure that MachineMemOperands are correctly propagated through all passes.
getNumLDMAddresses(const MachineInstr & MI) const3635 unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const {
3636 unsigned Size = 0;
3637 for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
3638 E = MI.memoperands_end();
3639 I != E; ++I) {
3640 Size += (*I)->getSize();
3641 }
3642 // FIXME: The scheduler currently can't handle values larger than 16. But
3643 // the values can actually go up to 32 for floating-point load/store
3644 // multiple (VLDMIA etc.). Also, the way this code is reasoning about memory
3645 // operations isn't right; we could end up with "extra" memory operands for
3646 // various reasons, like tail merge merging two memory operations.
3647 return std::min(Size / 4, 16U);
3648 }
3649
getNumMicroOpsSingleIssuePlusExtras(unsigned Opc,unsigned NumRegs)3650 static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc,
3651 unsigned NumRegs) {
3652 unsigned UOps = 1 + NumRegs; // 1 for address computation.
3653 switch (Opc) {
3654 default:
3655 break;
3656 case ARM::VLDMDIA_UPD:
3657 case ARM::VLDMDDB_UPD:
3658 case ARM::VLDMSIA_UPD:
3659 case ARM::VLDMSDB_UPD:
3660 case ARM::VSTMDIA_UPD:
3661 case ARM::VSTMDDB_UPD:
3662 case ARM::VSTMSIA_UPD:
3663 case ARM::VSTMSDB_UPD:
3664 case ARM::LDMIA_UPD:
3665 case ARM::LDMDA_UPD:
3666 case ARM::LDMDB_UPD:
3667 case ARM::LDMIB_UPD:
3668 case ARM::STMIA_UPD:
3669 case ARM::STMDA_UPD:
3670 case ARM::STMDB_UPD:
3671 case ARM::STMIB_UPD:
3672 case ARM::tLDMIA_UPD:
3673 case ARM::tSTMIA_UPD:
3674 case ARM::t2LDMIA_UPD:
3675 case ARM::t2LDMDB_UPD:
3676 case ARM::t2STMIA_UPD:
3677 case ARM::t2STMDB_UPD:
3678 ++UOps; // One for base register writeback.
3679 break;
3680 case ARM::LDMIA_RET:
3681 case ARM::tPOP_RET:
3682 case ARM::t2LDMIA_RET:
3683 UOps += 2; // One for base reg wb, one for write to pc.
3684 break;
3685 }
3686 return UOps;
3687 }
3688
getNumMicroOps(const InstrItineraryData * ItinData,const MachineInstr & MI) const3689 unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
3690 const MachineInstr &MI) const {
3691 if (!ItinData || ItinData->isEmpty())
3692 return 1;
3693
3694 const MCInstrDesc &Desc = MI.getDesc();
3695 unsigned Class = Desc.getSchedClass();
3696 int ItinUOps = ItinData->getNumMicroOps(Class);
3697 if (ItinUOps >= 0) {
3698 if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore()))
3699 return getNumMicroOpsSwiftLdSt(ItinData, MI);
3700
3701 return ItinUOps;
3702 }
3703
3704 unsigned Opc = MI.getOpcode();
3705 switch (Opc) {
3706 default:
3707 llvm_unreachable("Unexpected multi-uops instruction!");
3708 case ARM::VLDMQIA:
3709 case ARM::VSTMQIA:
3710 return 2;
3711
3712 // The number of uOps for load / store multiple are determined by the number
3713 // registers.
3714 //
3715 // On Cortex-A8, each pair of register loads / stores can be scheduled on the
3716 // same cycle. The scheduling for the first load / store must be done
3717 // separately by assuming the address is not 64-bit aligned.
3718 //
3719 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
3720 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
3721 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
3722 case ARM::VLDMDIA:
3723 case ARM::VLDMDIA_UPD:
3724 case ARM::VLDMDDB_UPD:
3725 case ARM::VLDMSIA:
3726 case ARM::VLDMSIA_UPD:
3727 case ARM::VLDMSDB_UPD:
3728 case ARM::VSTMDIA:
3729 case ARM::VSTMDIA_UPD:
3730 case ARM::VSTMDDB_UPD:
3731 case ARM::VSTMSIA:
3732 case ARM::VSTMSIA_UPD:
3733 case ARM::VSTMSDB_UPD: {
3734 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands();
3735 return (NumRegs / 2) + (NumRegs % 2) + 1;
3736 }
3737
3738 case ARM::LDMIA_RET:
3739 case ARM::LDMIA:
3740 case ARM::LDMDA:
3741 case ARM::LDMDB:
3742 case ARM::LDMIB:
3743 case ARM::LDMIA_UPD:
3744 case ARM::LDMDA_UPD:
3745 case ARM::LDMDB_UPD:
3746 case ARM::LDMIB_UPD:
3747 case ARM::STMIA:
3748 case ARM::STMDA:
3749 case ARM::STMDB:
3750 case ARM::STMIB:
3751 case ARM::STMIA_UPD:
3752 case ARM::STMDA_UPD:
3753 case ARM::STMDB_UPD:
3754 case ARM::STMIB_UPD:
3755 case ARM::tLDMIA:
3756 case ARM::tLDMIA_UPD:
3757 case ARM::tSTMIA_UPD:
3758 case ARM::tPOP_RET:
3759 case ARM::tPOP:
3760 case ARM::tPUSH:
3761 case ARM::t2LDMIA_RET:
3762 case ARM::t2LDMIA:
3763 case ARM::t2LDMDB:
3764 case ARM::t2LDMIA_UPD:
3765 case ARM::t2LDMDB_UPD:
3766 case ARM::t2STMIA:
3767 case ARM::t2STMDB:
3768 case ARM::t2STMIA_UPD:
3769 case ARM::t2STMDB_UPD: {
3770 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1;
3771 switch (Subtarget.getLdStMultipleTiming()) {
3772 case ARMSubtarget::SingleIssuePlusExtras:
3773 return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs);
3774 case ARMSubtarget::SingleIssue:
3775 // Assume the worst.
3776 return NumRegs;
3777 case ARMSubtarget::DoubleIssue: {
3778 if (NumRegs < 4)
3779 return 2;
3780 // 4 registers would be issued: 2, 2.
3781 // 5 registers would be issued: 2, 2, 1.
3782 unsigned UOps = (NumRegs / 2);
3783 if (NumRegs % 2)
3784 ++UOps;
3785 return UOps;
3786 }
3787 case ARMSubtarget::DoubleIssueCheckUnalignedAccess: {
3788 unsigned UOps = (NumRegs / 2);
3789 // If there are odd number of registers or if it's not 64-bit aligned,
3790 // then it takes an extra AGU (Address Generation Unit) cycle.
3791 if ((NumRegs % 2) || !MI.hasOneMemOperand() ||
3792 (*MI.memoperands_begin())->getAlign() < Align(8))
3793 ++UOps;
3794 return UOps;
3795 }
3796 }
3797 }
3798 }
3799 llvm_unreachable("Didn't find the number of microops");
3800 }
3801
3802 int
getVLDMDefCycle(const InstrItineraryData * ItinData,const MCInstrDesc & DefMCID,unsigned DefClass,unsigned DefIdx,unsigned DefAlign) const3803 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
3804 const MCInstrDesc &DefMCID,
3805 unsigned DefClass,
3806 unsigned DefIdx, unsigned DefAlign) const {
3807 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
3808 if (RegNo <= 0)
3809 // Def is the address writeback.
3810 return ItinData->getOperandCycle(DefClass, DefIdx);
3811
3812 int DefCycle;
3813 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3814 // (regno / 2) + (regno % 2) + 1
3815 DefCycle = RegNo / 2 + 1;
3816 if (RegNo % 2)
3817 ++DefCycle;
3818 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3819 DefCycle = RegNo;
3820 bool isSLoad = false;
3821
3822 switch (DefMCID.getOpcode()) {
3823 default: break;
3824 case ARM::VLDMSIA:
3825 case ARM::VLDMSIA_UPD:
3826 case ARM::VLDMSDB_UPD:
3827 isSLoad = true;
3828 break;
3829 }
3830
3831 // If there are odd number of 'S' registers or if it's not 64-bit aligned,
3832 // then it takes an extra cycle.
3833 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3834 ++DefCycle;
3835 } else {
3836 // Assume the worst.
3837 DefCycle = RegNo + 2;
3838 }
3839
3840 return DefCycle;
3841 }
3842
isLDMBaseRegInList(const MachineInstr & MI) const3843 bool ARMBaseInstrInfo::isLDMBaseRegInList(const MachineInstr &MI) const {
3844 Register BaseReg = MI.getOperand(0).getReg();
3845 for (unsigned i = 1, sz = MI.getNumOperands(); i < sz; ++i) {
3846 const auto &Op = MI.getOperand(i);
3847 if (Op.isReg() && Op.getReg() == BaseReg)
3848 return true;
3849 }
3850 return false;
3851 }
3852 unsigned
getLDMVariableDefsSize(const MachineInstr & MI) const3853 ARMBaseInstrInfo::getLDMVariableDefsSize(const MachineInstr &MI) const {
3854 // ins GPR:$Rn, $p (2xOp), reglist:$regs, variable_ops
3855 // (outs GPR:$wb), (ins GPR:$Rn, $p (2xOp), reglist:$regs, variable_ops)
3856 return MI.getNumOperands() + 1 - MI.getDesc().getNumOperands();
3857 }
3858
3859 int
getLDMDefCycle(const InstrItineraryData * ItinData,const MCInstrDesc & DefMCID,unsigned DefClass,unsigned DefIdx,unsigned DefAlign) const3860 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
3861 const MCInstrDesc &DefMCID,
3862 unsigned DefClass,
3863 unsigned DefIdx, unsigned DefAlign) const {
3864 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
3865 if (RegNo <= 0)
3866 // Def is the address writeback.
3867 return ItinData->getOperandCycle(DefClass, DefIdx);
3868
3869 int DefCycle;
3870 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3871 // 4 registers would be issued: 1, 2, 1.
3872 // 5 registers would be issued: 1, 2, 2.
3873 DefCycle = RegNo / 2;
3874 if (DefCycle < 1)
3875 DefCycle = 1;
3876 // Result latency is issue cycle + 2: E2.
3877 DefCycle += 2;
3878 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3879 DefCycle = (RegNo / 2);
3880 // If there are odd number of registers or if it's not 64-bit aligned,
3881 // then it takes an extra AGU (Address Generation Unit) cycle.
3882 if ((RegNo % 2) || DefAlign < 8)
3883 ++DefCycle;
3884 // Result latency is AGU cycles + 2.
3885 DefCycle += 2;
3886 } else {
3887 // Assume the worst.
3888 DefCycle = RegNo + 2;
3889 }
3890
3891 return DefCycle;
3892 }
3893
3894 int
getVSTMUseCycle(const InstrItineraryData * ItinData,const MCInstrDesc & UseMCID,unsigned UseClass,unsigned UseIdx,unsigned UseAlign) const3895 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
3896 const MCInstrDesc &UseMCID,
3897 unsigned UseClass,
3898 unsigned UseIdx, unsigned UseAlign) const {
3899 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
3900 if (RegNo <= 0)
3901 return ItinData->getOperandCycle(UseClass, UseIdx);
3902
3903 int UseCycle;
3904 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3905 // (regno / 2) + (regno % 2) + 1
3906 UseCycle = RegNo / 2 + 1;
3907 if (RegNo % 2)
3908 ++UseCycle;
3909 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3910 UseCycle = RegNo;
3911 bool isSStore = false;
3912
3913 switch (UseMCID.getOpcode()) {
3914 default: break;
3915 case ARM::VSTMSIA:
3916 case ARM::VSTMSIA_UPD:
3917 case ARM::VSTMSDB_UPD:
3918 isSStore = true;
3919 break;
3920 }
3921
3922 // If there are odd number of 'S' registers or if it's not 64-bit aligned,
3923 // then it takes an extra cycle.
3924 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3925 ++UseCycle;
3926 } else {
3927 // Assume the worst.
3928 UseCycle = RegNo + 2;
3929 }
3930
3931 return UseCycle;
3932 }
3933
3934 int
getSTMUseCycle(const InstrItineraryData * ItinData,const MCInstrDesc & UseMCID,unsigned UseClass,unsigned UseIdx,unsigned UseAlign) const3935 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
3936 const MCInstrDesc &UseMCID,
3937 unsigned UseClass,
3938 unsigned UseIdx, unsigned UseAlign) const {
3939 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
3940 if (RegNo <= 0)
3941 return ItinData->getOperandCycle(UseClass, UseIdx);
3942
3943 int UseCycle;
3944 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3945 UseCycle = RegNo / 2;
3946 if (UseCycle < 2)
3947 UseCycle = 2;
3948 // Read in E3.
3949 UseCycle += 2;
3950 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3951 UseCycle = (RegNo / 2);
3952 // If there are odd number of registers or if it's not 64-bit aligned,
3953 // then it takes an extra AGU (Address Generation Unit) cycle.
3954 if ((RegNo % 2) || UseAlign < 8)
3955 ++UseCycle;
3956 } else {
3957 // Assume the worst.
3958 UseCycle = 1;
3959 }
3960 return UseCycle;
3961 }
3962
3963 int
getOperandLatency(const InstrItineraryData * ItinData,const MCInstrDesc & DefMCID,unsigned DefIdx,unsigned DefAlign,const MCInstrDesc & UseMCID,unsigned UseIdx,unsigned UseAlign) const3964 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
3965 const MCInstrDesc &DefMCID,
3966 unsigned DefIdx, unsigned DefAlign,
3967 const MCInstrDesc &UseMCID,
3968 unsigned UseIdx, unsigned UseAlign) const {
3969 unsigned DefClass = DefMCID.getSchedClass();
3970 unsigned UseClass = UseMCID.getSchedClass();
3971
3972 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
3973 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
3974
3975 // This may be a def / use of a variable_ops instruction, the operand
3976 // latency might be determinable dynamically. Let the target try to
3977 // figure it out.
3978 int DefCycle = -1;
3979 bool LdmBypass = false;
3980 switch (DefMCID.getOpcode()) {
3981 default:
3982 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
3983 break;
3984
3985 case ARM::VLDMDIA:
3986 case ARM::VLDMDIA_UPD:
3987 case ARM::VLDMDDB_UPD:
3988 case ARM::VLDMSIA:
3989 case ARM::VLDMSIA_UPD:
3990 case ARM::VLDMSDB_UPD:
3991 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3992 break;
3993
3994 case ARM::LDMIA_RET:
3995 case ARM::LDMIA:
3996 case ARM::LDMDA:
3997 case ARM::LDMDB:
3998 case ARM::LDMIB:
3999 case ARM::LDMIA_UPD:
4000 case ARM::LDMDA_UPD:
4001 case ARM::LDMDB_UPD:
4002 case ARM::LDMIB_UPD:
4003 case ARM::tLDMIA:
4004 case ARM::tLDMIA_UPD:
4005 case ARM::tPUSH:
4006 case ARM::t2LDMIA_RET:
4007 case ARM::t2LDMIA:
4008 case ARM::t2LDMDB:
4009 case ARM::t2LDMIA_UPD:
4010 case ARM::t2LDMDB_UPD:
4011 LdmBypass = true;
4012 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4013 break;
4014 }
4015
4016 if (DefCycle == -1)
4017 // We can't seem to determine the result latency of the def, assume it's 2.
4018 DefCycle = 2;
4019
4020 int UseCycle = -1;
4021 switch (UseMCID.getOpcode()) {
4022 default:
4023 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
4024 break;
4025
4026 case ARM::VSTMDIA:
4027 case ARM::VSTMDIA_UPD:
4028 case ARM::VSTMDDB_UPD:
4029 case ARM::VSTMSIA:
4030 case ARM::VSTMSIA_UPD:
4031 case ARM::VSTMSDB_UPD:
4032 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4033 break;
4034
4035 case ARM::STMIA:
4036 case ARM::STMDA:
4037 case ARM::STMDB:
4038 case ARM::STMIB:
4039 case ARM::STMIA_UPD:
4040 case ARM::STMDA_UPD:
4041 case ARM::STMDB_UPD:
4042 case ARM::STMIB_UPD:
4043 case ARM::tSTMIA_UPD:
4044 case ARM::tPOP_RET:
4045 case ARM::tPOP:
4046 case ARM::t2STMIA:
4047 case ARM::t2STMDB:
4048 case ARM::t2STMIA_UPD:
4049 case ARM::t2STMDB_UPD:
4050 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4051 break;
4052 }
4053
4054 if (UseCycle == -1)
4055 // Assume it's read in the first stage.
4056 UseCycle = 1;
4057
4058 UseCycle = DefCycle - UseCycle + 1;
4059 if (UseCycle > 0) {
4060 if (LdmBypass) {
4061 // It's a variable_ops instruction so we can't use DefIdx here. Just use
4062 // first def operand.
4063 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
4064 UseClass, UseIdx))
4065 --UseCycle;
4066 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
4067 UseClass, UseIdx)) {
4068 --UseCycle;
4069 }
4070 }
4071
4072 return UseCycle;
4073 }
4074
getBundledDefMI(const TargetRegisterInfo * TRI,const MachineInstr * MI,unsigned Reg,unsigned & DefIdx,unsigned & Dist)4075 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
4076 const MachineInstr *MI, unsigned Reg,
4077 unsigned &DefIdx, unsigned &Dist) {
4078 Dist = 0;
4079
4080 MachineBasicBlock::const_iterator I = MI; ++I;
4081 MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator());
4082 assert(II->isInsideBundle() && "Empty bundle?");
4083
4084 int Idx = -1;
4085 while (II->isInsideBundle()) {
4086 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
4087 if (Idx != -1)
4088 break;
4089 --II;
4090 ++Dist;
4091 }
4092
4093 assert(Idx != -1 && "Cannot find bundled definition!");
4094 DefIdx = Idx;
4095 return &*II;
4096 }
4097
getBundledUseMI(const TargetRegisterInfo * TRI,const MachineInstr & MI,unsigned Reg,unsigned & UseIdx,unsigned & Dist)4098 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
4099 const MachineInstr &MI, unsigned Reg,
4100 unsigned &UseIdx, unsigned &Dist) {
4101 Dist = 0;
4102
4103 MachineBasicBlock::const_instr_iterator II = ++MI.getIterator();
4104 assert(II->isInsideBundle() && "Empty bundle?");
4105 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
4106
4107 // FIXME: This doesn't properly handle multiple uses.
4108 int Idx = -1;
4109 while (II != E && II->isInsideBundle()) {
4110 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
4111 if (Idx != -1)
4112 break;
4113 if (II->getOpcode() != ARM::t2IT)
4114 ++Dist;
4115 ++II;
4116 }
4117
4118 if (Idx == -1) {
4119 Dist = 0;
4120 return nullptr;
4121 }
4122
4123 UseIdx = Idx;
4124 return &*II;
4125 }
4126
4127 /// Return the number of cycles to add to (or subtract from) the static
4128 /// itinerary based on the def opcode and alignment. The caller will ensure that
4129 /// adjusted latency is at least one cycle.
adjustDefLatency(const ARMSubtarget & Subtarget,const MachineInstr & DefMI,const MCInstrDesc & DefMCID,unsigned DefAlign)4130 static int adjustDefLatency(const ARMSubtarget &Subtarget,
4131 const MachineInstr &DefMI,
4132 const MCInstrDesc &DefMCID, unsigned DefAlign) {
4133 int Adjust = 0;
4134 if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) {
4135 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
4136 // variants are one cycle cheaper.
4137 switch (DefMCID.getOpcode()) {
4138 default: break;
4139 case ARM::LDRrs:
4140 case ARM::LDRBrs: {
4141 unsigned ShOpVal = DefMI.getOperand(3).getImm();
4142 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4143 if (ShImm == 0 ||
4144 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4145 --Adjust;
4146 break;
4147 }
4148 case ARM::t2LDRs:
4149 case ARM::t2LDRBs:
4150 case ARM::t2LDRHs:
4151 case ARM::t2LDRSHs: {
4152 // Thumb2 mode: lsl only.
4153 unsigned ShAmt = DefMI.getOperand(3).getImm();
4154 if (ShAmt == 0 || ShAmt == 2)
4155 --Adjust;
4156 break;
4157 }
4158 }
4159 } else if (Subtarget.isSwift()) {
4160 // FIXME: Properly handle all of the latency adjustments for address
4161 // writeback.
4162 switch (DefMCID.getOpcode()) {
4163 default: break;
4164 case ARM::LDRrs:
4165 case ARM::LDRBrs: {
4166 unsigned ShOpVal = DefMI.getOperand(3).getImm();
4167 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
4168 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4169 if (!isSub &&
4170 (ShImm == 0 ||
4171 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4172 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
4173 Adjust -= 2;
4174 else if (!isSub &&
4175 ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
4176 --Adjust;
4177 break;
4178 }
4179 case ARM::t2LDRs:
4180 case ARM::t2LDRBs:
4181 case ARM::t2LDRHs:
4182 case ARM::t2LDRSHs: {
4183 // Thumb2 mode: lsl only.
4184 unsigned ShAmt = DefMI.getOperand(3).getImm();
4185 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4186 Adjust -= 2;
4187 break;
4188 }
4189 }
4190 }
4191
4192 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4193 switch (DefMCID.getOpcode()) {
4194 default: break;
4195 case ARM::VLD1q8:
4196 case ARM::VLD1q16:
4197 case ARM::VLD1q32:
4198 case ARM::VLD1q64:
4199 case ARM::VLD1q8wb_fixed:
4200 case ARM::VLD1q16wb_fixed:
4201 case ARM::VLD1q32wb_fixed:
4202 case ARM::VLD1q64wb_fixed:
4203 case ARM::VLD1q8wb_register:
4204 case ARM::VLD1q16wb_register:
4205 case ARM::VLD1q32wb_register:
4206 case ARM::VLD1q64wb_register:
4207 case ARM::VLD2d8:
4208 case ARM::VLD2d16:
4209 case ARM::VLD2d32:
4210 case ARM::VLD2q8:
4211 case ARM::VLD2q16:
4212 case ARM::VLD2q32:
4213 case ARM::VLD2d8wb_fixed:
4214 case ARM::VLD2d16wb_fixed:
4215 case ARM::VLD2d32wb_fixed:
4216 case ARM::VLD2q8wb_fixed:
4217 case ARM::VLD2q16wb_fixed:
4218 case ARM::VLD2q32wb_fixed:
4219 case ARM::VLD2d8wb_register:
4220 case ARM::VLD2d16wb_register:
4221 case ARM::VLD2d32wb_register:
4222 case ARM::VLD2q8wb_register:
4223 case ARM::VLD2q16wb_register:
4224 case ARM::VLD2q32wb_register:
4225 case ARM::VLD3d8:
4226 case ARM::VLD3d16:
4227 case ARM::VLD3d32:
4228 case ARM::VLD1d64T:
4229 case ARM::VLD3d8_UPD:
4230 case ARM::VLD3d16_UPD:
4231 case ARM::VLD3d32_UPD:
4232 case ARM::VLD1d64Twb_fixed:
4233 case ARM::VLD1d64Twb_register:
4234 case ARM::VLD3q8_UPD:
4235 case ARM::VLD3q16_UPD:
4236 case ARM::VLD3q32_UPD:
4237 case ARM::VLD4d8:
4238 case ARM::VLD4d16:
4239 case ARM::VLD4d32:
4240 case ARM::VLD1d64Q:
4241 case ARM::VLD4d8_UPD:
4242 case ARM::VLD4d16_UPD:
4243 case ARM::VLD4d32_UPD:
4244 case ARM::VLD1d64Qwb_fixed:
4245 case ARM::VLD1d64Qwb_register:
4246 case ARM::VLD4q8_UPD:
4247 case ARM::VLD4q16_UPD:
4248 case ARM::VLD4q32_UPD:
4249 case ARM::VLD1DUPq8:
4250 case ARM::VLD1DUPq16:
4251 case ARM::VLD1DUPq32:
4252 case ARM::VLD1DUPq8wb_fixed:
4253 case ARM::VLD1DUPq16wb_fixed:
4254 case ARM::VLD1DUPq32wb_fixed:
4255 case ARM::VLD1DUPq8wb_register:
4256 case ARM::VLD1DUPq16wb_register:
4257 case ARM::VLD1DUPq32wb_register:
4258 case ARM::VLD2DUPd8:
4259 case ARM::VLD2DUPd16:
4260 case ARM::VLD2DUPd32:
4261 case ARM::VLD2DUPd8wb_fixed:
4262 case ARM::VLD2DUPd16wb_fixed:
4263 case ARM::VLD2DUPd32wb_fixed:
4264 case ARM::VLD2DUPd8wb_register:
4265 case ARM::VLD2DUPd16wb_register:
4266 case ARM::VLD2DUPd32wb_register:
4267 case ARM::VLD4DUPd8:
4268 case ARM::VLD4DUPd16:
4269 case ARM::VLD4DUPd32:
4270 case ARM::VLD4DUPd8_UPD:
4271 case ARM::VLD4DUPd16_UPD:
4272 case ARM::VLD4DUPd32_UPD:
4273 case ARM::VLD1LNd8:
4274 case ARM::VLD1LNd16:
4275 case ARM::VLD1LNd32:
4276 case ARM::VLD1LNd8_UPD:
4277 case ARM::VLD1LNd16_UPD:
4278 case ARM::VLD1LNd32_UPD:
4279 case ARM::VLD2LNd8:
4280 case ARM::VLD2LNd16:
4281 case ARM::VLD2LNd32:
4282 case ARM::VLD2LNq16:
4283 case ARM::VLD2LNq32:
4284 case ARM::VLD2LNd8_UPD:
4285 case ARM::VLD2LNd16_UPD:
4286 case ARM::VLD2LNd32_UPD:
4287 case ARM::VLD2LNq16_UPD:
4288 case ARM::VLD2LNq32_UPD:
4289 case ARM::VLD4LNd8:
4290 case ARM::VLD4LNd16:
4291 case ARM::VLD4LNd32:
4292 case ARM::VLD4LNq16:
4293 case ARM::VLD4LNq32:
4294 case ARM::VLD4LNd8_UPD:
4295 case ARM::VLD4LNd16_UPD:
4296 case ARM::VLD4LNd32_UPD:
4297 case ARM::VLD4LNq16_UPD:
4298 case ARM::VLD4LNq32_UPD:
4299 // If the address is not 64-bit aligned, the latencies of these
4300 // instructions increases by one.
4301 ++Adjust;
4302 break;
4303 }
4304 }
4305 return Adjust;
4306 }
4307
getOperandLatency(const InstrItineraryData * ItinData,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx) const4308 int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4309 const MachineInstr &DefMI,
4310 unsigned DefIdx,
4311 const MachineInstr &UseMI,
4312 unsigned UseIdx) const {
4313 // No operand latency. The caller may fall back to getInstrLatency.
4314 if (!ItinData || ItinData->isEmpty())
4315 return -1;
4316
4317 const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4318 Register Reg = DefMO.getReg();
4319
4320 const MachineInstr *ResolvedDefMI = &DefMI;
4321 unsigned DefAdj = 0;
4322 if (DefMI.isBundle())
4323 ResolvedDefMI =
4324 getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj);
4325 if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() ||
4326 ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) {
4327 return 1;
4328 }
4329
4330 const MachineInstr *ResolvedUseMI = &UseMI;
4331 unsigned UseAdj = 0;
4332 if (UseMI.isBundle()) {
4333 ResolvedUseMI =
4334 getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj);
4335 if (!ResolvedUseMI)
4336 return -1;
4337 }
4338
4339 return getOperandLatencyImpl(
4340 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO,
4341 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj);
4342 }
4343
getOperandLatencyImpl(const InstrItineraryData * ItinData,const MachineInstr & DefMI,unsigned DefIdx,const MCInstrDesc & DefMCID,unsigned DefAdj,const MachineOperand & DefMO,unsigned Reg,const MachineInstr & UseMI,unsigned UseIdx,const MCInstrDesc & UseMCID,unsigned UseAdj) const4344 int ARMBaseInstrInfo::getOperandLatencyImpl(
4345 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
4346 unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj,
4347 const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI,
4348 unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const {
4349 if (Reg == ARM::CPSR) {
4350 if (DefMI.getOpcode() == ARM::FMSTAT) {
4351 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
4352 return Subtarget.isLikeA9() ? 1 : 20;
4353 }
4354
4355 // CPSR set and branch can be paired in the same cycle.
4356 if (UseMI.isBranch())
4357 return 0;
4358
4359 // Otherwise it takes the instruction latency (generally one).
4360 unsigned Latency = getInstrLatency(ItinData, DefMI);
4361
4362 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
4363 // its uses. Instructions which are otherwise scheduled between them may
4364 // incur a code size penalty (not able to use the CPSR setting 16-bit
4365 // instructions).
4366 if (Latency > 0 && Subtarget.isThumb2()) {
4367 const MachineFunction *MF = DefMI.getParent()->getParent();
4368 // FIXME: Use Function::hasOptSize().
4369 if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize))
4370 --Latency;
4371 }
4372 return Latency;
4373 }
4374
4375 if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit())
4376 return -1;
4377
4378 unsigned DefAlign = DefMI.hasOneMemOperand()
4379 ? (*DefMI.memoperands_begin())->getAlign().value()
4380 : 0;
4381 unsigned UseAlign = UseMI.hasOneMemOperand()
4382 ? (*UseMI.memoperands_begin())->getAlign().value()
4383 : 0;
4384
4385 // Get the itinerary's latency if possible, and handle variable_ops.
4386 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID,
4387 UseIdx, UseAlign);
4388 // Unable to find operand latency. The caller may resort to getInstrLatency.
4389 if (Latency < 0)
4390 return Latency;
4391
4392 // Adjust for IT block position.
4393 int Adj = DefAdj + UseAdj;
4394
4395 // Adjust for dynamic def-side opcode variants not captured by the itinerary.
4396 Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign);
4397 if (Adj >= 0 || (int)Latency > -Adj) {
4398 return Latency + Adj;
4399 }
4400 // Return the itinerary latency, which may be zero but not less than zero.
4401 return Latency;
4402 }
4403
4404 int
getOperandLatency(const InstrItineraryData * ItinData,SDNode * DefNode,unsigned DefIdx,SDNode * UseNode,unsigned UseIdx) const4405 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
4406 SDNode *DefNode, unsigned DefIdx,
4407 SDNode *UseNode, unsigned UseIdx) const {
4408 if (!DefNode->isMachineOpcode())
4409 return 1;
4410
4411 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
4412
4413 if (isZeroCost(DefMCID.Opcode))
4414 return 0;
4415
4416 if (!ItinData || ItinData->isEmpty())
4417 return DefMCID.mayLoad() ? 3 : 1;
4418
4419 if (!UseNode->isMachineOpcode()) {
4420 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
4421 int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4422 int Threshold = 1 + Adj;
4423 return Latency <= Threshold ? 1 : Latency - Adj;
4424 }
4425
4426 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
4427 auto *DefMN = cast<MachineSDNode>(DefNode);
4428 unsigned DefAlign = !DefMN->memoperands_empty()
4429 ? (*DefMN->memoperands_begin())->getAlign().value()
4430 : 0;
4431 auto *UseMN = cast<MachineSDNode>(UseNode);
4432 unsigned UseAlign = !UseMN->memoperands_empty()
4433 ? (*UseMN->memoperands_begin())->getAlign().value()
4434 : 0;
4435 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
4436 UseMCID, UseIdx, UseAlign);
4437
4438 if (Latency > 1 &&
4439 (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4440 Subtarget.isCortexA7())) {
4441 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
4442 // variants are one cycle cheaper.
4443 switch (DefMCID.getOpcode()) {
4444 default: break;
4445 case ARM::LDRrs:
4446 case ARM::LDRBrs: {
4447 unsigned ShOpVal =
4448 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4449 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4450 if (ShImm == 0 ||
4451 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4452 --Latency;
4453 break;
4454 }
4455 case ARM::t2LDRs:
4456 case ARM::t2LDRBs:
4457 case ARM::t2LDRHs:
4458 case ARM::t2LDRSHs: {
4459 // Thumb2 mode: lsl only.
4460 unsigned ShAmt =
4461 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4462 if (ShAmt == 0 || ShAmt == 2)
4463 --Latency;
4464 break;
4465 }
4466 }
4467 } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) {
4468 // FIXME: Properly handle all of the latency adjustments for address
4469 // writeback.
4470 switch (DefMCID.getOpcode()) {
4471 default: break;
4472 case ARM::LDRrs:
4473 case ARM::LDRBrs: {
4474 unsigned ShOpVal =
4475 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
4476 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
4477 if (ShImm == 0 ||
4478 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4479 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
4480 Latency -= 2;
4481 else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
4482 --Latency;
4483 break;
4484 }
4485 case ARM::t2LDRs:
4486 case ARM::t2LDRBs:
4487 case ARM::t2LDRHs:
4488 case ARM::t2LDRSHs:
4489 // Thumb2 mode: lsl 0-3 only.
4490 Latency -= 2;
4491 break;
4492 }
4493 }
4494
4495 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4496 switch (DefMCID.getOpcode()) {
4497 default: break;
4498 case ARM::VLD1q8:
4499 case ARM::VLD1q16:
4500 case ARM::VLD1q32:
4501 case ARM::VLD1q64:
4502 case ARM::VLD1q8wb_register:
4503 case ARM::VLD1q16wb_register:
4504 case ARM::VLD1q32wb_register:
4505 case ARM::VLD1q64wb_register:
4506 case ARM::VLD1q8wb_fixed:
4507 case ARM::VLD1q16wb_fixed:
4508 case ARM::VLD1q32wb_fixed:
4509 case ARM::VLD1q64wb_fixed:
4510 case ARM::VLD2d8:
4511 case ARM::VLD2d16:
4512 case ARM::VLD2d32:
4513 case ARM::VLD2q8Pseudo:
4514 case ARM::VLD2q16Pseudo:
4515 case ARM::VLD2q32Pseudo:
4516 case ARM::VLD2d8wb_fixed:
4517 case ARM::VLD2d16wb_fixed:
4518 case ARM::VLD2d32wb_fixed:
4519 case ARM::VLD2q8PseudoWB_fixed:
4520 case ARM::VLD2q16PseudoWB_fixed:
4521 case ARM::VLD2q32PseudoWB_fixed:
4522 case ARM::VLD2d8wb_register:
4523 case ARM::VLD2d16wb_register:
4524 case ARM::VLD2d32wb_register:
4525 case ARM::VLD2q8PseudoWB_register:
4526 case ARM::VLD2q16PseudoWB_register:
4527 case ARM::VLD2q32PseudoWB_register:
4528 case ARM::VLD3d8Pseudo:
4529 case ARM::VLD3d16Pseudo:
4530 case ARM::VLD3d32Pseudo:
4531 case ARM::VLD1d8TPseudo:
4532 case ARM::VLD1d16TPseudo:
4533 case ARM::VLD1d32TPseudo:
4534 case ARM::VLD1d64TPseudo:
4535 case ARM::VLD1d64TPseudoWB_fixed:
4536 case ARM::VLD1d64TPseudoWB_register:
4537 case ARM::VLD3d8Pseudo_UPD:
4538 case ARM::VLD3d16Pseudo_UPD:
4539 case ARM::VLD3d32Pseudo_UPD:
4540 case ARM::VLD3q8Pseudo_UPD:
4541 case ARM::VLD3q16Pseudo_UPD:
4542 case ARM::VLD3q32Pseudo_UPD:
4543 case ARM::VLD3q8oddPseudo:
4544 case ARM::VLD3q16oddPseudo:
4545 case ARM::VLD3q32oddPseudo:
4546 case ARM::VLD3q8oddPseudo_UPD:
4547 case ARM::VLD3q16oddPseudo_UPD:
4548 case ARM::VLD3q32oddPseudo_UPD:
4549 case ARM::VLD4d8Pseudo:
4550 case ARM::VLD4d16Pseudo:
4551 case ARM::VLD4d32Pseudo:
4552 case ARM::VLD1d8QPseudo:
4553 case ARM::VLD1d16QPseudo:
4554 case ARM::VLD1d32QPseudo:
4555 case ARM::VLD1d64QPseudo:
4556 case ARM::VLD1d64QPseudoWB_fixed:
4557 case ARM::VLD1d64QPseudoWB_register:
4558 case ARM::VLD1q8HighQPseudo:
4559 case ARM::VLD1q8LowQPseudo_UPD:
4560 case ARM::VLD1q8HighTPseudo:
4561 case ARM::VLD1q8LowTPseudo_UPD:
4562 case ARM::VLD1q16HighQPseudo:
4563 case ARM::VLD1q16LowQPseudo_UPD:
4564 case ARM::VLD1q16HighTPseudo:
4565 case ARM::VLD1q16LowTPseudo_UPD:
4566 case ARM::VLD1q32HighQPseudo:
4567 case ARM::VLD1q32LowQPseudo_UPD:
4568 case ARM::VLD1q32HighTPseudo:
4569 case ARM::VLD1q32LowTPseudo_UPD:
4570 case ARM::VLD1q64HighQPseudo:
4571 case ARM::VLD1q64LowQPseudo_UPD:
4572 case ARM::VLD1q64HighTPseudo:
4573 case ARM::VLD1q64LowTPseudo_UPD:
4574 case ARM::VLD4d8Pseudo_UPD:
4575 case ARM::VLD4d16Pseudo_UPD:
4576 case ARM::VLD4d32Pseudo_UPD:
4577 case ARM::VLD4q8Pseudo_UPD:
4578 case ARM::VLD4q16Pseudo_UPD:
4579 case ARM::VLD4q32Pseudo_UPD:
4580 case ARM::VLD4q8oddPseudo:
4581 case ARM::VLD4q16oddPseudo:
4582 case ARM::VLD4q32oddPseudo:
4583 case ARM::VLD4q8oddPseudo_UPD:
4584 case ARM::VLD4q16oddPseudo_UPD:
4585 case ARM::VLD4q32oddPseudo_UPD:
4586 case ARM::VLD1DUPq8:
4587 case ARM::VLD1DUPq16:
4588 case ARM::VLD1DUPq32:
4589 case ARM::VLD1DUPq8wb_fixed:
4590 case ARM::VLD1DUPq16wb_fixed:
4591 case ARM::VLD1DUPq32wb_fixed:
4592 case ARM::VLD1DUPq8wb_register:
4593 case ARM::VLD1DUPq16wb_register:
4594 case ARM::VLD1DUPq32wb_register:
4595 case ARM::VLD2DUPd8:
4596 case ARM::VLD2DUPd16:
4597 case ARM::VLD2DUPd32:
4598 case ARM::VLD2DUPd8wb_fixed:
4599 case ARM::VLD2DUPd16wb_fixed:
4600 case ARM::VLD2DUPd32wb_fixed:
4601 case ARM::VLD2DUPd8wb_register:
4602 case ARM::VLD2DUPd16wb_register:
4603 case ARM::VLD2DUPd32wb_register:
4604 case ARM::VLD2DUPq8EvenPseudo:
4605 case ARM::VLD2DUPq8OddPseudo:
4606 case ARM::VLD2DUPq16EvenPseudo:
4607 case ARM::VLD2DUPq16OddPseudo:
4608 case ARM::VLD2DUPq32EvenPseudo:
4609 case ARM::VLD2DUPq32OddPseudo:
4610 case ARM::VLD3DUPq8EvenPseudo:
4611 case ARM::VLD3DUPq8OddPseudo:
4612 case ARM::VLD3DUPq16EvenPseudo:
4613 case ARM::VLD3DUPq16OddPseudo:
4614 case ARM::VLD3DUPq32EvenPseudo:
4615 case ARM::VLD3DUPq32OddPseudo:
4616 case ARM::VLD4DUPd8Pseudo:
4617 case ARM::VLD4DUPd16Pseudo:
4618 case ARM::VLD4DUPd32Pseudo:
4619 case ARM::VLD4DUPd8Pseudo_UPD:
4620 case ARM::VLD4DUPd16Pseudo_UPD:
4621 case ARM::VLD4DUPd32Pseudo_UPD:
4622 case ARM::VLD4DUPq8EvenPseudo:
4623 case ARM::VLD4DUPq8OddPseudo:
4624 case ARM::VLD4DUPq16EvenPseudo:
4625 case ARM::VLD4DUPq16OddPseudo:
4626 case ARM::VLD4DUPq32EvenPseudo:
4627 case ARM::VLD4DUPq32OddPseudo:
4628 case ARM::VLD1LNq8Pseudo:
4629 case ARM::VLD1LNq16Pseudo:
4630 case ARM::VLD1LNq32Pseudo:
4631 case ARM::VLD1LNq8Pseudo_UPD:
4632 case ARM::VLD1LNq16Pseudo_UPD:
4633 case ARM::VLD1LNq32Pseudo_UPD:
4634 case ARM::VLD2LNd8Pseudo:
4635 case ARM::VLD2LNd16Pseudo:
4636 case ARM::VLD2LNd32Pseudo:
4637 case ARM::VLD2LNq16Pseudo:
4638 case ARM::VLD2LNq32Pseudo:
4639 case ARM::VLD2LNd8Pseudo_UPD:
4640 case ARM::VLD2LNd16Pseudo_UPD:
4641 case ARM::VLD2LNd32Pseudo_UPD:
4642 case ARM::VLD2LNq16Pseudo_UPD:
4643 case ARM::VLD2LNq32Pseudo_UPD:
4644 case ARM::VLD4LNd8Pseudo:
4645 case ARM::VLD4LNd16Pseudo:
4646 case ARM::VLD4LNd32Pseudo:
4647 case ARM::VLD4LNq16Pseudo:
4648 case ARM::VLD4LNq32Pseudo:
4649 case ARM::VLD4LNd8Pseudo_UPD:
4650 case ARM::VLD4LNd16Pseudo_UPD:
4651 case ARM::VLD4LNd32Pseudo_UPD:
4652 case ARM::VLD4LNq16Pseudo_UPD:
4653 case ARM::VLD4LNq32Pseudo_UPD:
4654 // If the address is not 64-bit aligned, the latencies of these
4655 // instructions increases by one.
4656 ++Latency;
4657 break;
4658 }
4659
4660 return Latency;
4661 }
4662
getPredicationCost(const MachineInstr & MI) const4663 unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const {
4664 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
4665 MI.isImplicitDef())
4666 return 0;
4667
4668 if (MI.isBundle())
4669 return 0;
4670
4671 const MCInstrDesc &MCID = MI.getDesc();
4672
4673 if (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4674 !Subtarget.cheapPredicableCPSRDef())) {
4675 // When predicated, CPSR is an additional source operand for CPSR updating
4676 // instructions, this apparently increases their latencies.
4677 return 1;
4678 }
4679 return 0;
4680 }
4681
getInstrLatency(const InstrItineraryData * ItinData,const MachineInstr & MI,unsigned * PredCost) const4682 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
4683 const MachineInstr &MI,
4684 unsigned *PredCost) const {
4685 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
4686 MI.isImplicitDef())
4687 return 1;
4688
4689 // An instruction scheduler typically runs on unbundled instructions, however
4690 // other passes may query the latency of a bundled instruction.
4691 if (MI.isBundle()) {
4692 unsigned Latency = 0;
4693 MachineBasicBlock::const_instr_iterator I = MI.getIterator();
4694 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
4695 while (++I != E && I->isInsideBundle()) {
4696 if (I->getOpcode() != ARM::t2IT)
4697 Latency += getInstrLatency(ItinData, *I, PredCost);
4698 }
4699 return Latency;
4700 }
4701
4702 const MCInstrDesc &MCID = MI.getDesc();
4703 if (PredCost && (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4704 !Subtarget.cheapPredicableCPSRDef()))) {
4705 // When predicated, CPSR is an additional source operand for CPSR updating
4706 // instructions, this apparently increases their latencies.
4707 *PredCost = 1;
4708 }
4709 // Be sure to call getStageLatency for an empty itinerary in case it has a
4710 // valid MinLatency property.
4711 if (!ItinData)
4712 return MI.mayLoad() ? 3 : 1;
4713
4714 unsigned Class = MCID.getSchedClass();
4715
4716 // For instructions with variable uops, use uops as latency.
4717 if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0)
4718 return getNumMicroOps(ItinData, MI);
4719
4720 // For the common case, fall back on the itinerary's latency.
4721 unsigned Latency = ItinData->getStageLatency(Class);
4722
4723 // Adjust for dynamic def-side opcode variants not captured by the itinerary.
4724 unsigned DefAlign =
4725 MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0;
4726 int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign);
4727 if (Adj >= 0 || (int)Latency > -Adj) {
4728 return Latency + Adj;
4729 }
4730 return Latency;
4731 }
4732
getInstrLatency(const InstrItineraryData * ItinData,SDNode * Node) const4733 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
4734 SDNode *Node) const {
4735 if (!Node->isMachineOpcode())
4736 return 1;
4737
4738 if (!ItinData || ItinData->isEmpty())
4739 return 1;
4740
4741 unsigned Opcode = Node->getMachineOpcode();
4742 switch (Opcode) {
4743 default:
4744 return ItinData->getStageLatency(get(Opcode).getSchedClass());
4745 case ARM::VLDMQIA:
4746 case ARM::VSTMQIA:
4747 return 2;
4748 }
4749 }
4750
hasHighOperandLatency(const TargetSchedModel & SchedModel,const MachineRegisterInfo * MRI,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx) const4751 bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
4752 const MachineRegisterInfo *MRI,
4753 const MachineInstr &DefMI,
4754 unsigned DefIdx,
4755 const MachineInstr &UseMI,
4756 unsigned UseIdx) const {
4757 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
4758 unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask;
4759 if (Subtarget.nonpipelinedVFP() &&
4760 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
4761 return true;
4762
4763 // Hoist VFP / NEON instructions with 4 or higher latency.
4764 unsigned Latency =
4765 SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx);
4766 if (Latency <= 3)
4767 return false;
4768 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
4769 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
4770 }
4771
hasLowDefLatency(const TargetSchedModel & SchedModel,const MachineInstr & DefMI,unsigned DefIdx) const4772 bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
4773 const MachineInstr &DefMI,
4774 unsigned DefIdx) const {
4775 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
4776 if (!ItinData || ItinData->isEmpty())
4777 return false;
4778
4779 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
4780 if (DDomain == ARMII::DomainGeneral) {
4781 unsigned DefClass = DefMI.getDesc().getSchedClass();
4782 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
4783 return (DefCycle != -1 && DefCycle <= 2);
4784 }
4785 return false;
4786 }
4787
verifyInstruction(const MachineInstr & MI,StringRef & ErrInfo) const4788 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI,
4789 StringRef &ErrInfo) const {
4790 if (convertAddSubFlagsOpcode(MI.getOpcode())) {
4791 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
4792 return false;
4793 }
4794 if (MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4795 // Make sure we don't generate a lo-lo mov that isn't supported.
4796 if (!ARM::hGPRRegClass.contains(MI.getOperand(0).getReg()) &&
4797 !ARM::hGPRRegClass.contains(MI.getOperand(1).getReg())) {
4798 ErrInfo = "Non-flag-setting Thumb1 mov is v6-only";
4799 return false;
4800 }
4801 }
4802 if (MI.getOpcode() == ARM::tPUSH ||
4803 MI.getOpcode() == ARM::tPOP ||
4804 MI.getOpcode() == ARM::tPOP_RET) {
4805 for (int i = 2, e = MI.getNumOperands(); i < e; ++i) {
4806 if (MI.getOperand(i).isImplicit() ||
4807 !MI.getOperand(i).isReg())
4808 continue;
4809 Register Reg = MI.getOperand(i).getReg();
4810 if (Reg < ARM::R0 || Reg > ARM::R7) {
4811 if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4812 !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4813 ErrInfo = "Unsupported register in Thumb1 push/pop";
4814 return false;
4815 }
4816 }
4817 }
4818 }
4819 return true;
4820 }
4821
4822 // LoadStackGuard has so far only been implemented for MachO. Different code
4823 // sequence is needed for other targets.
expandLoadStackGuardBase(MachineBasicBlock::iterator MI,unsigned LoadImmOpc,unsigned LoadOpc) const4824 void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
4825 unsigned LoadImmOpc,
4826 unsigned LoadOpc) const {
4827 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4828 "ROPI/RWPI not currently supported with stack guard");
4829
4830 MachineBasicBlock &MBB = *MI->getParent();
4831 DebugLoc DL = MI->getDebugLoc();
4832 Register Reg = MI->getOperand(0).getReg();
4833 const GlobalValue *GV =
4834 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
4835 MachineInstrBuilder MIB;
4836
4837 BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
4838 .addGlobalAddress(GV, 0, ARMII::MO_NONLAZY);
4839
4840 if (Subtarget.isGVIndirectSymbol(GV)) {
4841 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
4842 MIB.addReg(Reg, RegState::Kill).addImm(0);
4843 auto Flags = MachineMemOperand::MOLoad |
4844 MachineMemOperand::MODereferenceable |
4845 MachineMemOperand::MOInvariant;
4846 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
4847 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, Align(4));
4848 MIB.addMemOperand(MMO).add(predOps(ARMCC::AL));
4849 }
4850
4851 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
4852 MIB.addReg(Reg, RegState::Kill)
4853 .addImm(0)
4854 .cloneMemRefs(*MI)
4855 .add(predOps(ARMCC::AL));
4856 }
4857
4858 bool
isFpMLxInstruction(unsigned Opcode,unsigned & MulOpc,unsigned & AddSubOpc,bool & NegAcc,bool & HasLane) const4859 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
4860 unsigned &AddSubOpc,
4861 bool &NegAcc, bool &HasLane) const {
4862 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
4863 if (I == MLxEntryMap.end())
4864 return false;
4865
4866 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
4867 MulOpc = Entry.MulOpc;
4868 AddSubOpc = Entry.AddSubOpc;
4869 NegAcc = Entry.NegAcc;
4870 HasLane = Entry.HasLane;
4871 return true;
4872 }
4873
4874 //===----------------------------------------------------------------------===//
4875 // Execution domains.
4876 //===----------------------------------------------------------------------===//
4877 //
4878 // Some instructions go down the NEON pipeline, some go down the VFP pipeline,
4879 // and some can go down both. The vmov instructions go down the VFP pipeline,
4880 // but they can be changed to vorr equivalents that are executed by the NEON
4881 // pipeline.
4882 //
4883 // We use the following execution domain numbering:
4884 //
4885 enum ARMExeDomain {
4886 ExeGeneric = 0,
4887 ExeVFP = 1,
4888 ExeNEON = 2
4889 };
4890
4891 //
4892 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
4893 //
4894 std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr & MI) const4895 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const {
4896 // If we don't have access to NEON instructions then we won't be able
4897 // to swizzle anything to the NEON domain. Check to make sure.
4898 if (Subtarget.hasNEON()) {
4899 // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON
4900 // if they are not predicated.
4901 if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI))
4902 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
4903
4904 // CortexA9 is particularly picky about mixing the two and wants these
4905 // converted.
4906 if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) &&
4907 (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR ||
4908 MI.getOpcode() == ARM::VMOVS))
4909 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
4910 }
4911 // No other instructions can be swizzled, so just determine their domain.
4912 unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask;
4913
4914 if (Domain & ARMII::DomainNEON)
4915 return std::make_pair(ExeNEON, 0);
4916
4917 // Certain instructions can go either way on Cortex-A8.
4918 // Treat them as NEON instructions.
4919 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8())
4920 return std::make_pair(ExeNEON, 0);
4921
4922 if (Domain & ARMII::DomainVFP)
4923 return std::make_pair(ExeVFP, 0);
4924
4925 return std::make_pair(ExeGeneric, 0);
4926 }
4927
getCorrespondingDRegAndLane(const TargetRegisterInfo * TRI,unsigned SReg,unsigned & Lane)4928 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI,
4929 unsigned SReg, unsigned &Lane) {
4930 unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
4931 Lane = 0;
4932
4933 if (DReg != ARM::NoRegister)
4934 return DReg;
4935
4936 Lane = 1;
4937 DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
4938
4939 assert(DReg && "S-register with no D super-register?");
4940 return DReg;
4941 }
4942
4943 /// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane,
4944 /// set ImplicitSReg to a register number that must be marked as implicit-use or
4945 /// zero if no register needs to be defined as implicit-use.
4946 ///
4947 /// If the function cannot determine if an SPR should be marked implicit use or
4948 /// not, it returns false.
4949 ///
4950 /// This function handles cases where an instruction is being modified from taking
4951 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict
4952 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other
4953 /// lane of the DPR).
4954 ///
4955 /// If the other SPR is defined, an implicit-use of it should be added. Else,
4956 /// (including the case where the DPR itself is defined), it should not.
4957 ///
getImplicitSPRUseForDPRUse(const TargetRegisterInfo * TRI,MachineInstr & MI,unsigned DReg,unsigned Lane,unsigned & ImplicitSReg)4958 static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
4959 MachineInstr &MI, unsigned DReg,
4960 unsigned Lane, unsigned &ImplicitSReg) {
4961 // If the DPR is defined or used already, the other SPR lane will be chained
4962 // correctly, so there is nothing to be done.
4963 if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) {
4964 ImplicitSReg = 0;
4965 return true;
4966 }
4967
4968 // Otherwise we need to go searching to see if the SPR is set explicitly.
4969 ImplicitSReg = TRI->getSubReg(DReg,
4970 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
4971 MachineBasicBlock::LivenessQueryResult LQR =
4972 MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI);
4973
4974 if (LQR == MachineBasicBlock::LQR_Live)
4975 return true;
4976 else if (LQR == MachineBasicBlock::LQR_Unknown)
4977 return false;
4978
4979 // If the register is known not to be live, there is no need to add an
4980 // implicit-use.
4981 ImplicitSReg = 0;
4982 return true;
4983 }
4984
setExecutionDomain(MachineInstr & MI,unsigned Domain) const4985 void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI,
4986 unsigned Domain) const {
4987 unsigned DstReg, SrcReg, DReg;
4988 unsigned Lane;
4989 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
4990 const TargetRegisterInfo *TRI = &getRegisterInfo();
4991 switch (MI.getOpcode()) {
4992 default:
4993 llvm_unreachable("cannot handle opcode!");
4994 break;
4995 case ARM::VMOVD:
4996 if (Domain != ExeNEON)
4997 break;
4998
4999 // Zap the predicate operands.
5000 assert(!isPredicated(MI) && "Cannot predicate a VORRd");
5001
5002 // Make sure we've got NEON instructions.
5003 assert(Subtarget.hasNEON() && "VORRd requires NEON");
5004
5005 // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
5006 DstReg = MI.getOperand(0).getReg();
5007 SrcReg = MI.getOperand(1).getReg();
5008
5009 for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5010 MI.RemoveOperand(i - 1);
5011
5012 // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
5013 MI.setDesc(get(ARM::VORRd));
5014 MIB.addReg(DstReg, RegState::Define)
5015 .addReg(SrcReg)
5016 .addReg(SrcReg)
5017 .add(predOps(ARMCC::AL));
5018 break;
5019 case ARM::VMOVRS:
5020 if (Domain != ExeNEON)
5021 break;
5022 assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
5023
5024 // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
5025 DstReg = MI.getOperand(0).getReg();
5026 SrcReg = MI.getOperand(1).getReg();
5027
5028 for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5029 MI.RemoveOperand(i - 1);
5030
5031 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
5032
5033 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
5034 // Note that DSrc has been widened and the other lane may be undef, which
5035 // contaminates the entire register.
5036 MI.setDesc(get(ARM::VGETLNi32));
5037 MIB.addReg(DstReg, RegState::Define)
5038 .addReg(DReg, RegState::Undef)
5039 .addImm(Lane)
5040 .add(predOps(ARMCC::AL));
5041
5042 // The old source should be an implicit use, otherwise we might think it
5043 // was dead before here.
5044 MIB.addReg(SrcReg, RegState::Implicit);
5045 break;
5046 case ARM::VMOVSR: {
5047 if (Domain != ExeNEON)
5048 break;
5049 assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
5050
5051 // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
5052 DstReg = MI.getOperand(0).getReg();
5053 SrcReg = MI.getOperand(1).getReg();
5054
5055 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
5056
5057 unsigned ImplicitSReg;
5058 if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg))
5059 break;
5060
5061 for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5062 MI.RemoveOperand(i - 1);
5063
5064 // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
5065 // Again DDst may be undefined at the beginning of this instruction.
5066 MI.setDesc(get(ARM::VSETLNi32));
5067 MIB.addReg(DReg, RegState::Define)
5068 .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI)))
5069 .addReg(SrcReg)
5070 .addImm(Lane)
5071 .add(predOps(ARMCC::AL));
5072
5073 // The narrower destination must be marked as set to keep previous chains
5074 // in place.
5075 MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
5076 if (ImplicitSReg != 0)
5077 MIB.addReg(ImplicitSReg, RegState::Implicit);
5078 break;
5079 }
5080 case ARM::VMOVS: {
5081 if (Domain != ExeNEON)
5082 break;
5083
5084 // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits)
5085 DstReg = MI.getOperand(0).getReg();
5086 SrcReg = MI.getOperand(1).getReg();
5087
5088 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
5089 DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane);
5090 DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane);
5091
5092 unsigned ImplicitSReg;
5093 if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg))
5094 break;
5095
5096 for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
5097 MI.RemoveOperand(i - 1);
5098
5099 if (DSrc == DDst) {
5100 // Destination can be:
5101 // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits)
5102 MI.setDesc(get(ARM::VDUPLN32d));
5103 MIB.addReg(DDst, RegState::Define)
5104 .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI)))
5105 .addImm(SrcLane)
5106 .add(predOps(ARMCC::AL));
5107
5108 // Neither the source or the destination are naturally represented any
5109 // more, so add them in manually.
5110 MIB.addReg(DstReg, RegState::Implicit | RegState::Define);
5111 MIB.addReg(SrcReg, RegState::Implicit);
5112 if (ImplicitSReg != 0)
5113 MIB.addReg(ImplicitSReg, RegState::Implicit);
5114 break;
5115 }
5116
5117 // In general there's no single instruction that can perform an S <-> S
5118 // move in NEON space, but a pair of VEXT instructions *can* do the
5119 // job. It turns out that the VEXTs needed will only use DSrc once, with
5120 // the position based purely on the combination of lane-0 and lane-1
5121 // involved. For example
5122 // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1
5123 // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1
5124 // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1
5125 // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1
5126 //
5127 // Pattern of the MachineInstrs is:
5128 // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits)
5129 MachineInstrBuilder NewMIB;
5130 NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32),
5131 DDst);
5132
5133 // On the first instruction, both DSrc and DDst may be undef if present.
5134 // Specifically when the original instruction didn't have them as an
5135 // <imp-use>.
5136 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5137 bool CurUndef = !MI.readsRegister(CurReg, TRI);
5138 NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
5139
5140 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5141 CurUndef = !MI.readsRegister(CurReg, TRI);
5142 NewMIB.addReg(CurReg, getUndefRegState(CurUndef))
5143 .addImm(1)
5144 .add(predOps(ARMCC::AL));
5145
5146 if (SrcLane == DstLane)
5147 NewMIB.addReg(SrcReg, RegState::Implicit);
5148
5149 MI.setDesc(get(ARM::VEXTd32));
5150 MIB.addReg(DDst, RegState::Define);
5151
5152 // On the second instruction, DDst has definitely been defined above, so
5153 // it is not undef. DSrc, if present, can be undef as above.
5154 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5155 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
5156 MIB.addReg(CurReg, getUndefRegState(CurUndef));
5157
5158 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5159 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
5160 MIB.addReg(CurReg, getUndefRegState(CurUndef))
5161 .addImm(1)
5162 .add(predOps(ARMCC::AL));
5163
5164 if (SrcLane != DstLane)
5165 MIB.addReg(SrcReg, RegState::Implicit);
5166
5167 // As before, the original destination is no longer represented, add it
5168 // implicitly.
5169 MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
5170 if (ImplicitSReg != 0)
5171 MIB.addReg(ImplicitSReg, RegState::Implicit);
5172 break;
5173 }
5174 }
5175 }
5176
5177 //===----------------------------------------------------------------------===//
5178 // Partial register updates
5179 //===----------------------------------------------------------------------===//
5180 //
5181 // Swift renames NEON registers with 64-bit granularity. That means any
5182 // instruction writing an S-reg implicitly reads the containing D-reg. The
5183 // problem is mostly avoided by translating f32 operations to v2f32 operations
5184 // on D-registers, but f32 loads are still a problem.
5185 //
5186 // These instructions can load an f32 into a NEON register:
5187 //
5188 // VLDRS - Only writes S, partial D update.
5189 // VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops.
5190 // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops.
5191 //
5192 // FCONSTD can be used as a dependency-breaking instruction.
getPartialRegUpdateClearance(const MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const5193 unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance(
5194 const MachineInstr &MI, unsigned OpNum,
5195 const TargetRegisterInfo *TRI) const {
5196 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
5197 if (!PartialUpdateClearance)
5198 return 0;
5199
5200 assert(TRI && "Need TRI instance");
5201
5202 const MachineOperand &MO = MI.getOperand(OpNum);
5203 if (MO.readsReg())
5204 return 0;
5205 Register Reg = MO.getReg();
5206 int UseOp = -1;
5207
5208 switch (MI.getOpcode()) {
5209 // Normal instructions writing only an S-register.
5210 case ARM::VLDRS:
5211 case ARM::FCONSTS:
5212 case ARM::VMOVSR:
5213 case ARM::VMOVv8i8:
5214 case ARM::VMOVv4i16:
5215 case ARM::VMOVv2i32:
5216 case ARM::VMOVv2f32:
5217 case ARM::VMOVv1i64:
5218 UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI);
5219 break;
5220
5221 // Explicitly reads the dependency.
5222 case ARM::VLD1LNd32:
5223 UseOp = 3;
5224 break;
5225 default:
5226 return 0;
5227 }
5228
5229 // If this instruction actually reads a value from Reg, there is no unwanted
5230 // dependency.
5231 if (UseOp != -1 && MI.getOperand(UseOp).readsReg())
5232 return 0;
5233
5234 // We must be able to clobber the whole D-reg.
5235 if (Register::isVirtualRegister(Reg)) {
5236 // Virtual register must be a def undef foo:ssub_0 operand.
5237 if (!MO.getSubReg() || MI.readsVirtualRegister(Reg))
5238 return 0;
5239 } else if (ARM::SPRRegClass.contains(Reg)) {
5240 // Physical register: MI must define the full D-reg.
5241 unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
5242 &ARM::DPRRegClass);
5243 if (!DReg || !MI.definesRegister(DReg, TRI))
5244 return 0;
5245 }
5246
5247 // MI has an unwanted D-register dependency.
5248 // Avoid defs in the previous N instructrions.
5249 return PartialUpdateClearance;
5250 }
5251
5252 // Break a partial register dependency after getPartialRegUpdateClearance
5253 // returned non-zero.
breakPartialRegDependency(MachineInstr & MI,unsigned OpNum,const TargetRegisterInfo * TRI) const5254 void ARMBaseInstrInfo::breakPartialRegDependency(
5255 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
5256 assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def");
5257 assert(TRI && "Need TRI instance");
5258
5259 const MachineOperand &MO = MI.getOperand(OpNum);
5260 Register Reg = MO.getReg();
5261 assert(Register::isPhysicalRegister(Reg) &&
5262 "Can't break virtual register dependencies.");
5263 unsigned DReg = Reg;
5264
5265 // If MI defines an S-reg, find the corresponding D super-register.
5266 if (ARM::SPRRegClass.contains(Reg)) {
5267 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5268 assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken");
5269 }
5270
5271 assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps");
5272 assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg");
5273
5274 // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines
5275 // the full D-register by loading the same value to both lanes. The
5276 // instruction is micro-coded with 2 uops, so don't do this until we can
5277 // properly schedule micro-coded instructions. The dispatcher stalls cause
5278 // too big regressions.
5279
5280 // Insert the dependency-breaking FCONSTD before MI.
5281 // 96 is the encoding of 0.5, but the actual value doesn't matter here.
5282 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg)
5283 .addImm(96)
5284 .add(predOps(ARMCC::AL));
5285 MI.addRegisterKilled(DReg, TRI, true);
5286 }
5287
hasNOP() const5288 bool ARMBaseInstrInfo::hasNOP() const {
5289 return Subtarget.getFeatureBits()[ARM::HasV6KOps];
5290 }
5291
isSwiftFastImmShift(const MachineInstr * MI) const5292 bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const {
5293 if (MI->getNumOperands() < 4)
5294 return true;
5295 unsigned ShOpVal = MI->getOperand(3).getImm();
5296 unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal);
5297 // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1.
5298 if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) ||
5299 ((ShImm == 1 || ShImm == 2) &&
5300 ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl))
5301 return true;
5302
5303 return false;
5304 }
5305
getRegSequenceLikeInputs(const MachineInstr & MI,unsigned DefIdx,SmallVectorImpl<RegSubRegPairAndIdx> & InputRegs) const5306 bool ARMBaseInstrInfo::getRegSequenceLikeInputs(
5307 const MachineInstr &MI, unsigned DefIdx,
5308 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
5309 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5310 assert(MI.isRegSequenceLike() && "Invalid kind of instruction");
5311
5312 switch (MI.getOpcode()) {
5313 case ARM::VMOVDRR:
5314 // dX = VMOVDRR rY, rZ
5315 // is the same as:
5316 // dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1
5317 // Populate the InputRegs accordingly.
5318 // rY
5319 const MachineOperand *MOReg = &MI.getOperand(1);
5320 if (!MOReg->isUndef())
5321 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(),
5322 MOReg->getSubReg(), ARM::ssub_0));
5323 // rZ
5324 MOReg = &MI.getOperand(2);
5325 if (!MOReg->isUndef())
5326 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(),
5327 MOReg->getSubReg(), ARM::ssub_1));
5328 return true;
5329 }
5330 llvm_unreachable("Target dependent opcode missing");
5331 }
5332
getExtractSubregLikeInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPairAndIdx & InputReg) const5333 bool ARMBaseInstrInfo::getExtractSubregLikeInputs(
5334 const MachineInstr &MI, unsigned DefIdx,
5335 RegSubRegPairAndIdx &InputReg) const {
5336 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5337 assert(MI.isExtractSubregLike() && "Invalid kind of instruction");
5338
5339 switch (MI.getOpcode()) {
5340 case ARM::VMOVRRD:
5341 // rX, rY = VMOVRRD dZ
5342 // is the same as:
5343 // rX = EXTRACT_SUBREG dZ, ssub_0
5344 // rY = EXTRACT_SUBREG dZ, ssub_1
5345 const MachineOperand &MOReg = MI.getOperand(2);
5346 if (MOReg.isUndef())
5347 return false;
5348 InputReg.Reg = MOReg.getReg();
5349 InputReg.SubReg = MOReg.getSubReg();
5350 InputReg.SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5351 return true;
5352 }
5353 llvm_unreachable("Target dependent opcode missing");
5354 }
5355
getInsertSubregLikeInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPair & BaseReg,RegSubRegPairAndIdx & InsertedReg) const5356 bool ARMBaseInstrInfo::getInsertSubregLikeInputs(
5357 const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg,
5358 RegSubRegPairAndIdx &InsertedReg) const {
5359 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index");
5360 assert(MI.isInsertSubregLike() && "Invalid kind of instruction");
5361
5362 switch (MI.getOpcode()) {
5363 case ARM::VSETLNi32:
5364 // dX = VSETLNi32 dY, rZ, imm
5365 const MachineOperand &MOBaseReg = MI.getOperand(1);
5366 const MachineOperand &MOInsertedReg = MI.getOperand(2);
5367 if (MOInsertedReg.isUndef())
5368 return false;
5369 const MachineOperand &MOIndex = MI.getOperand(3);
5370 BaseReg.Reg = MOBaseReg.getReg();
5371 BaseReg.SubReg = MOBaseReg.getSubReg();
5372
5373 InsertedReg.Reg = MOInsertedReg.getReg();
5374 InsertedReg.SubReg = MOInsertedReg.getSubReg();
5375 InsertedReg.SubIdx = MOIndex.getImm() == 0 ? ARM::ssub_0 : ARM::ssub_1;
5376 return true;
5377 }
5378 llvm_unreachable("Target dependent opcode missing");
5379 }
5380
5381 std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const5382 ARMBaseInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
5383 const unsigned Mask = ARMII::MO_OPTION_MASK;
5384 return std::make_pair(TF & Mask, TF & ~Mask);
5385 }
5386
5387 ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const5388 ARMBaseInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
5389 using namespace ARMII;
5390
5391 static const std::pair<unsigned, const char *> TargetFlags[] = {
5392 {MO_LO16, "arm-lo16"}, {MO_HI16, "arm-hi16"}};
5393 return makeArrayRef(TargetFlags);
5394 }
5395
5396 ArrayRef<std::pair<unsigned, const char *>>
getSerializableBitmaskMachineOperandTargetFlags() const5397 ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
5398 using namespace ARMII;
5399
5400 static const std::pair<unsigned, const char *> TargetFlags[] = {
5401 {MO_COFFSTUB, "arm-coffstub"},
5402 {MO_GOT, "arm-got"},
5403 {MO_SBREL, "arm-sbrel"},
5404 {MO_DLLIMPORT, "arm-dllimport"},
5405 {MO_SECREL, "arm-secrel"},
5406 {MO_NONLAZY, "arm-nonlazy"}};
5407 return makeArrayRef(TargetFlags);
5408 }
5409
isAddImmediate(const MachineInstr & MI,Register Reg) const5410 Optional<RegImmPair> ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI,
5411 Register Reg) const {
5412 int Sign = 1;
5413 unsigned Opcode = MI.getOpcode();
5414 int64_t Offset = 0;
5415
5416 // TODO: Handle cases where Reg is a super- or sub-register of the
5417 // destination register.
5418 const MachineOperand &Op0 = MI.getOperand(0);
5419 if (!Op0.isReg() || Reg != Op0.getReg())
5420 return None;
5421
5422 // We describe SUBri or ADDri instructions.
5423 if (Opcode == ARM::SUBri)
5424 Sign = -1;
5425 else if (Opcode != ARM::ADDri)
5426 return None;
5427
5428 // TODO: Third operand can be global address (usually some string). Since
5429 // strings can be relocated we cannot calculate their offsets for
5430 // now.
5431 if (!MI.getOperand(1).isReg() || !MI.getOperand(2).isImm())
5432 return None;
5433
5434 Offset = MI.getOperand(2).getImm() * Sign;
5435 return RegImmPair{MI.getOperand(1).getReg(), Offset};
5436 }
5437
registerDefinedBetween(unsigned Reg,MachineBasicBlock::iterator From,MachineBasicBlock::iterator To,const TargetRegisterInfo * TRI)5438 bool llvm::registerDefinedBetween(unsigned Reg,
5439 MachineBasicBlock::iterator From,
5440 MachineBasicBlock::iterator To,
5441 const TargetRegisterInfo *TRI) {
5442 for (auto I = From; I != To; ++I)
5443 if (I->modifiesRegister(Reg, TRI))
5444 return true;
5445 return false;
5446 }
5447
findCMPToFoldIntoCBZ(MachineInstr * Br,const TargetRegisterInfo * TRI)5448 MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br,
5449 const TargetRegisterInfo *TRI) {
5450 // Search backwards to the instruction that defines CSPR. This may or not
5451 // be a CMP, we check that after this loop. If we find another instruction
5452 // that reads cpsr, we return nullptr.
5453 MachineBasicBlock::iterator CmpMI = Br;
5454 while (CmpMI != Br->getParent()->begin()) {
5455 --CmpMI;
5456 if (CmpMI->modifiesRegister(ARM::CPSR, TRI))
5457 break;
5458 if (CmpMI->readsRegister(ARM::CPSR, TRI))
5459 break;
5460 }
5461
5462 // Check that this inst is a CMP r[0-7], #0 and that the register
5463 // is not redefined between the cmp and the br.
5464 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5465 return nullptr;
5466 Register Reg = CmpMI->getOperand(0).getReg();
5467 Register PredReg;
5468 ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg);
5469 if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5470 return nullptr;
5471 if (!isARMLowRegister(Reg))
5472 return nullptr;
5473 if (registerDefinedBetween(Reg, CmpMI->getNextNode(), Br, TRI))
5474 return nullptr;
5475
5476 return &*CmpMI;
5477 }
5478
ConstantMaterializationCost(unsigned Val,const ARMSubtarget * Subtarget,bool ForCodesize)5479 unsigned llvm::ConstantMaterializationCost(unsigned Val,
5480 const ARMSubtarget *Subtarget,
5481 bool ForCodesize) {
5482 if (Subtarget->isThumb()) {
5483 if (Val <= 255) // MOV
5484 return ForCodesize ? 2 : 1;
5485 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff || // MOV
5486 ARM_AM::getT2SOImmVal(Val) != -1 || // MOVW
5487 ARM_AM::getT2SOImmVal(~Val) != -1)) // MVN
5488 return ForCodesize ? 4 : 1;
5489 if (Val <= 510) // MOV + ADDi8
5490 return ForCodesize ? 4 : 2;
5491 if (~Val <= 255) // MOV + MVN
5492 return ForCodesize ? 4 : 2;
5493 if (ARM_AM::isThumbImmShiftedVal(Val)) // MOV + LSL
5494 return ForCodesize ? 4 : 2;
5495 } else {
5496 if (ARM_AM::getSOImmVal(Val) != -1) // MOV
5497 return ForCodesize ? 4 : 1;
5498 if (ARM_AM::getSOImmVal(~Val) != -1) // MVN
5499 return ForCodesize ? 4 : 1;
5500 if (Subtarget->hasV6T2Ops() && Val <= 0xffff) // MOVW
5501 return ForCodesize ? 4 : 1;
5502 if (ARM_AM::isSOImmTwoPartVal(Val)) // two instrs
5503 return ForCodesize ? 8 : 2;
5504 }
5505 if (Subtarget->useMovt()) // MOVW + MOVT
5506 return ForCodesize ? 8 : 2;
5507 return ForCodesize ? 8 : 3; // Literal pool load
5508 }
5509
HasLowerConstantMaterializationCost(unsigned Val1,unsigned Val2,const ARMSubtarget * Subtarget,bool ForCodesize)5510 bool llvm::HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2,
5511 const ARMSubtarget *Subtarget,
5512 bool ForCodesize) {
5513 // Check with ForCodesize
5514 unsigned Cost1 = ConstantMaterializationCost(Val1, Subtarget, ForCodesize);
5515 unsigned Cost2 = ConstantMaterializationCost(Val2, Subtarget, ForCodesize);
5516 if (Cost1 < Cost2)
5517 return true;
5518 if (Cost1 > Cost2)
5519 return false;
5520
5521 // If they are equal, try with !ForCodesize
5522 return ConstantMaterializationCost(Val1, Subtarget, !ForCodesize) <
5523 ConstantMaterializationCost(Val2, Subtarget, !ForCodesize);
5524 }
5525
5526 /// Constants defining how certain sequences should be outlined.
5527 /// This encompasses how an outlined function should be called, and what kind of
5528 /// frame should be emitted for that outlined function.
5529 ///
5530 /// \p MachineOutlinerTailCall implies that the function is being created from
5531 /// a sequence of instructions ending in a return.
5532 ///
5533 /// That is,
5534 ///
5535 /// I1 OUTLINED_FUNCTION:
5536 /// I2 --> B OUTLINED_FUNCTION I1
5537 /// BX LR I2
5538 /// BX LR
5539 ///
5540 /// +-------------------------+--------+-----+
5541 /// | | Thumb2 | ARM |
5542 /// +-------------------------+--------+-----+
5543 /// | Call overhead in Bytes | 4 | 4 |
5544 /// | Frame overhead in Bytes | 0 | 0 |
5545 /// | Stack fixup required | No | No |
5546 /// +-------------------------+--------+-----+
5547 ///
5548 /// \p MachineOutlinerThunk implies that the function is being created from
5549 /// a sequence of instructions ending in a call. The outlined function is
5550 /// called with a BL instruction, and the outlined function tail-calls the
5551 /// original call destination.
5552 ///
5553 /// That is,
5554 ///
5555 /// I1 OUTLINED_FUNCTION:
5556 /// I2 --> BL OUTLINED_FUNCTION I1
5557 /// BL f I2
5558 /// B f
5559 ///
5560 /// +-------------------------+--------+-----+
5561 /// | | Thumb2 | ARM |
5562 /// +-------------------------+--------+-----+
5563 /// | Call overhead in Bytes | 4 | 4 |
5564 /// | Frame overhead in Bytes | 0 | 0 |
5565 /// | Stack fixup required | No | No |
5566 /// +-------------------------+--------+-----+
5567 ///
5568 /// \p MachineOutlinerNoLRSave implies that the function should be called using
5569 /// a BL instruction, but doesn't require LR to be saved and restored. This
5570 /// happens when LR is known to be dead.
5571 ///
5572 /// That is,
5573 ///
5574 /// I1 OUTLINED_FUNCTION:
5575 /// I2 --> BL OUTLINED_FUNCTION I1
5576 /// I3 I2
5577 /// I3
5578 /// BX LR
5579 ///
5580 /// +-------------------------+--------+-----+
5581 /// | | Thumb2 | ARM |
5582 /// +-------------------------+--------+-----+
5583 /// | Call overhead in Bytes | 4 | 4 |
5584 /// | Frame overhead in Bytes | 4 | 4 |
5585 /// | Stack fixup required | No | No |
5586 /// +-------------------------+--------+-----+
5587 ///
5588 /// \p MachineOutlinerRegSave implies that the function should be called with a
5589 /// save and restore of LR to an available register. This allows us to avoid
5590 /// stack fixups. Note that this outlining variant is compatible with the
5591 /// NoLRSave case.
5592 ///
5593 /// That is,
5594 ///
5595 /// I1 Save LR OUTLINED_FUNCTION:
5596 /// I2 --> BL OUTLINED_FUNCTION I1
5597 /// I3 Restore LR I2
5598 /// I3
5599 /// BX LR
5600 ///
5601 /// +-------------------------+--------+-----+
5602 /// | | Thumb2 | ARM |
5603 /// +-------------------------+--------+-----+
5604 /// | Call overhead in Bytes | 8 | 12 |
5605 /// | Frame overhead in Bytes | 2 | 4 |
5606 /// | Stack fixup required | No | No |
5607 /// +-------------------------+--------+-----+
5608
5609 enum MachineOutlinerClass {
5610 MachineOutlinerTailCall,
5611 MachineOutlinerThunk,
5612 MachineOutlinerNoLRSave,
5613 MachineOutlinerRegSave
5614 };
5615
5616 enum MachineOutlinerMBBFlags {
5617 LRUnavailableSomewhere = 0x2,
5618 HasCalls = 0x4,
5619 UnsafeRegsDead = 0x8
5620 };
5621
5622 struct OutlinerCosts {
5623 const int CallTailCall;
5624 const int FrameTailCall;
5625 const int CallThunk;
5626 const int FrameThunk;
5627 const int CallNoLRSave;
5628 const int FrameNoLRSave;
5629 const int CallRegSave;
5630 const int FrameRegSave;
5631
OutlinerCostsOutlinerCosts5632 OutlinerCosts(const ARMSubtarget &target)
5633 : CallTailCall(target.isThumb() ? 4 : 4),
5634 FrameTailCall(target.isThumb() ? 0 : 0),
5635 CallThunk(target.isThumb() ? 4 : 4),
5636 FrameThunk(target.isThumb() ? 0 : 0),
5637 CallNoLRSave(target.isThumb() ? 4 : 4),
5638 FrameNoLRSave(target.isThumb() ? 4 : 4),
5639 CallRegSave(target.isThumb() ? 8 : 12),
5640 FrameRegSave(target.isThumb() ? 2 : 4) {}
5641 };
5642
5643 unsigned
findRegisterToSaveLRTo(const outliner::Candidate & C) const5644 ARMBaseInstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const {
5645 assert(C.LRUWasSet && "LRU wasn't set?");
5646 MachineFunction *MF = C.getMF();
5647 const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo *>(
5648 MF->getSubtarget().getRegisterInfo());
5649
5650 BitVector regsReserved = ARI->getReservedRegs(*MF);
5651 // Check if there is an available register across the sequence that we can
5652 // use.
5653 for (unsigned Reg : ARM::rGPRRegClass) {
5654 if (!(Reg < regsReserved.size() && regsReserved.test(Reg)) &&
5655 Reg != ARM::LR && // LR is not reserved, but don't use it.
5656 Reg != ARM::R12 && // R12 is not guaranteed to be preserved.
5657 C.LRU.available(Reg) && C.UsedInSequence.available(Reg))
5658 return Reg;
5659 }
5660
5661 // No suitable register. Return 0.
5662 return 0u;
5663 }
5664
getOutliningCandidateInfo(std::vector<outliner::Candidate> & RepeatedSequenceLocs) const5665 outliner::OutlinedFunction ARMBaseInstrInfo::getOutliningCandidateInfo(
5666 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
5667 outliner::Candidate &FirstCand = RepeatedSequenceLocs[0];
5668 unsigned SequenceSize =
5669 std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0,
5670 [this](unsigned Sum, const MachineInstr &MI) {
5671 return Sum + getInstSizeInBytes(MI);
5672 });
5673
5674 // Properties about candidate MBBs that hold for all of them.
5675 unsigned FlagsSetInAll = 0xF;
5676
5677 // Compute liveness information for each candidate, and set FlagsSetInAll.
5678 const TargetRegisterInfo &TRI = getRegisterInfo();
5679 std::for_each(
5680 RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
5681 [&FlagsSetInAll](outliner::Candidate &C) { FlagsSetInAll &= C.Flags; });
5682
5683 // According to the ARM Procedure Call Standard, the following are
5684 // undefined on entry/exit from a function call:
5685 //
5686 // * Register R12(IP),
5687 // * Condition codes (and thus the CPSR register)
5688 //
5689 // Since we control the instructions which are part of the outlined regions
5690 // we don't need to be fully compliant with the AAPCS, but we have to
5691 // guarantee that if a veneer is inserted at link time the code is still
5692 // correct. Because of this, we can't outline any sequence of instructions
5693 // where one of these registers is live into/across it. Thus, we need to
5694 // delete those candidates.
5695 auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) {
5696 // If the unsafe registers in this block are all dead, then we don't need
5697 // to compute liveness here.
5698 if (C.Flags & UnsafeRegsDead)
5699 return false;
5700 C.initLRU(TRI);
5701 LiveRegUnits LRU = C.LRU;
5702 return (!LRU.available(ARM::R12) || !LRU.available(ARM::CPSR));
5703 };
5704
5705 // Are there any candidates where those registers are live?
5706 if (!(FlagsSetInAll & UnsafeRegsDead)) {
5707 // Erase every candidate that violates the restrictions above. (It could be
5708 // true that we have viable candidates, so it's not worth bailing out in
5709 // the case that, say, 1 out of 20 candidates violate the restructions.)
5710 RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(),
5711 RepeatedSequenceLocs.end(),
5712 CantGuaranteeValueAcrossCall),
5713 RepeatedSequenceLocs.end());
5714
5715 // If the sequence doesn't have enough candidates left, then we're done.
5716 if (RepeatedSequenceLocs.size() < 2)
5717 return outliner::OutlinedFunction();
5718 }
5719
5720 // At this point, we have only "safe" candidates to outline. Figure out
5721 // frame + call instruction information.
5722
5723 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode();
5724
5725 // Helper lambda which sets call information for every candidate.
5726 auto SetCandidateCallInfo =
5727 [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) {
5728 for (outliner::Candidate &C : RepeatedSequenceLocs)
5729 C.setCallInfo(CallID, NumBytesForCall);
5730 };
5731
5732 OutlinerCosts Costs(Subtarget);
5733 unsigned FrameID = 0;
5734 unsigned NumBytesToCreateFrame = 0;
5735
5736 // If the last instruction in any candidate is a terminator, then we should
5737 // tail call all of the candidates.
5738 if (RepeatedSequenceLocs[0].back()->isTerminator()) {
5739 FrameID = MachineOutlinerTailCall;
5740 NumBytesToCreateFrame = Costs.FrameTailCall;
5741 SetCandidateCallInfo(MachineOutlinerTailCall, Costs.CallTailCall);
5742 } else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5743 LastInstrOpcode == ARM::tBL || LastInstrOpcode == ARM::tBLXr ||
5744 LastInstrOpcode == ARM::tBLXi) {
5745 FrameID = MachineOutlinerThunk;
5746 NumBytesToCreateFrame = Costs.FrameThunk;
5747 SetCandidateCallInfo(MachineOutlinerThunk, Costs.CallThunk);
5748 } else {
5749 // We need to decide how to emit calls + frames. We can always emit the same
5750 // frame if we don't need to save to the stack.
5751 unsigned NumBytesNoStackCalls = 0;
5752 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5753
5754 for (outliner::Candidate &C : RepeatedSequenceLocs) {
5755 C.initLRU(TRI);
5756
5757 // Is LR available? If so, we don't need a save.
5758 if (C.LRU.available(ARM::LR)) {
5759 FrameID = MachineOutlinerNoLRSave;
5760 NumBytesNoStackCalls += Costs.CallNoLRSave;
5761 C.setCallInfo(MachineOutlinerNoLRSave, Costs.CallNoLRSave);
5762 CandidatesWithoutStackFixups.push_back(C);
5763 }
5764
5765 // Is an unused register available? If so, we won't modify the stack, so
5766 // we can outline with the same frame type as those that don't save LR.
5767 else if (findRegisterToSaveLRTo(C)) {
5768 FrameID = MachineOutlinerRegSave;
5769 NumBytesNoStackCalls += Costs.CallRegSave;
5770 C.setCallInfo(MachineOutlinerRegSave, Costs.CallRegSave);
5771 CandidatesWithoutStackFixups.push_back(C);
5772 }
5773 }
5774
5775 if (!CandidatesWithoutStackFixups.empty()) {
5776 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5777 } else
5778 return outliner::OutlinedFunction();
5779 }
5780
5781 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
5782 NumBytesToCreateFrame, FrameID);
5783 }
5784
isFunctionSafeToOutlineFrom(MachineFunction & MF,bool OutlineFromLinkOnceODRs) const5785 bool ARMBaseInstrInfo::isFunctionSafeToOutlineFrom(
5786 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
5787 const Function &F = MF.getFunction();
5788
5789 // Can F be deduplicated by the linker? If it can, don't outline from it.
5790 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
5791 return false;
5792
5793 // Don't outline from functions with section markings; the program could
5794 // expect that all the code is in the named section.
5795 // FIXME: Allow outlining from multiple functions with the same section
5796 // marking.
5797 if (F.hasSection())
5798 return false;
5799
5800 // FIXME: Thumb1 outlining is not handled
5801 if (MF.getInfo<ARMFunctionInfo>()->isThumb1OnlyFunction())
5802 return false;
5803
5804 // It's safe to outline from MF.
5805 return true;
5806 }
5807
isMBBSafeToOutlineFrom(MachineBasicBlock & MBB,unsigned & Flags) const5808 bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
5809 unsigned &Flags) const {
5810 // Check if LR is available through all of the MBB. If it's not, then set
5811 // a flag.
5812 assert(MBB.getParent()->getRegInfo().tracksLiveness() &&
5813 "Suitable Machine Function for outlining must track liveness");
5814
5815 LiveRegUnits LRU(getRegisterInfo());
5816
5817 std::for_each(MBB.rbegin(), MBB.rend(),
5818 [&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
5819
5820 // Check if each of the unsafe registers are available...
5821 bool R12AvailableInBlock = LRU.available(ARM::R12);
5822 bool CPSRAvailableInBlock = LRU.available(ARM::CPSR);
5823
5824 // If all of these are dead (and not live out), we know we don't have to check
5825 // them later.
5826 if (R12AvailableInBlock && CPSRAvailableInBlock)
5827 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
5828
5829 // Now, add the live outs to the set.
5830 LRU.addLiveOuts(MBB);
5831
5832 // If any of these registers is available in the MBB, but also a live out of
5833 // the block, then we know outlining is unsafe.
5834 if (R12AvailableInBlock && !LRU.available(ARM::R12))
5835 return false;
5836 if (CPSRAvailableInBlock && !LRU.available(ARM::CPSR))
5837 return false;
5838
5839 // Check if there's a call inside this MachineBasicBlock. If there is, then
5840 // set a flag.
5841 if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); }))
5842 Flags |= MachineOutlinerMBBFlags::HasCalls;
5843
5844 if (!LRU.available(ARM::LR))
5845 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
5846
5847 return true;
5848 }
5849
5850 outliner::InstrType
getOutliningType(MachineBasicBlock::iterator & MIT,unsigned Flags) const5851 ARMBaseInstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
5852 unsigned Flags) const {
5853 MachineInstr &MI = *MIT;
5854 const TargetRegisterInfo *TRI = &getRegisterInfo();
5855
5856 // Be conservative with inline ASM
5857 if (MI.isInlineAsm())
5858 return outliner::InstrType::Illegal;
5859
5860 // Don't allow debug values to impact outlining type.
5861 if (MI.isDebugInstr() || MI.isIndirectDebugValue())
5862 return outliner::InstrType::Invisible;
5863
5864 // At this point, KILL or IMPLICIT_DEF instructions don't really tell us much
5865 // so we can go ahead and skip over them.
5866 if (MI.isKill() || MI.isImplicitDef())
5867 return outliner::InstrType::Invisible;
5868
5869 // PIC instructions contain labels, outlining them would break offset
5870 // computing. unsigned Opc = MI.getOpcode();
5871 unsigned Opc = MI.getOpcode();
5872 if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR ||
5873 Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR ||
5874 Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB ||
5875 Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic ||
5876 Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel ||
5877 Opc == ARM::t2MOV_ga_pcrel)
5878 return outliner::InstrType::Illegal;
5879
5880 // Be conservative with ARMv8.1 MVE instructions.
5881 if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart ||
5882 Opc == ARM::t2WhileLoopStart || Opc == ARM::t2LoopDec ||
5883 Opc == ARM::t2LoopEnd)
5884 return outliner::InstrType::Illegal;
5885
5886 const MCInstrDesc &MCID = MI.getDesc();
5887 uint64_t MIFlags = MCID.TSFlags;
5888 if ((MIFlags & ARMII::DomainMask) == ARMII::DomainMVE)
5889 return outliner::InstrType::Illegal;
5890
5891 // Is this a terminator for a basic block?
5892 if (MI.isTerminator()) {
5893 // Don't outline if the branch is not unconditional.
5894 if (isPredicated(MI))
5895 return outliner::InstrType::Illegal;
5896
5897 // Is this the end of a function?
5898 if (MI.getParent()->succ_empty())
5899 return outliner::InstrType::Legal;
5900
5901 // It's not, so don't outline it.
5902 return outliner::InstrType::Illegal;
5903 }
5904
5905 // Make sure none of the operands are un-outlinable.
5906 for (const MachineOperand &MOP : MI.operands()) {
5907 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
5908 MOP.isTargetIndex())
5909 return outliner::InstrType::Illegal;
5910 }
5911
5912 // Don't outline if link register or program counter value are used.
5913 if (MI.readsRegister(ARM::LR, TRI) || MI.readsRegister(ARM::PC, TRI))
5914 return outliner::InstrType::Illegal;
5915
5916 if (MI.isCall()) {
5917 // If we don't know anything about the callee, assume it depends on the
5918 // stack layout of the caller. In that case, it's only legal to outline
5919 // as a tail-call. Explicitly list the call instructions we know about so
5920 // we don't get unexpected results with call pseudo-instructions.
5921 auto UnknownCallOutlineType = outliner::InstrType::Illegal;
5922 if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX ||
5923 Opc == ARM::tBLXr || Opc == ARM::tBLXi)
5924 UnknownCallOutlineType = outliner::InstrType::LegalTerminator;
5925
5926 return UnknownCallOutlineType;
5927 }
5928
5929 // Since calls are handled, don't touch LR or PC
5930 if (MI.modifiesRegister(ARM::LR, TRI) || MI.modifiesRegister(ARM::PC, TRI))
5931 return outliner::InstrType::Illegal;
5932
5933 // Does this use the stack?
5934 if (MI.modifiesRegister(ARM::SP, TRI) || MI.readsRegister(ARM::SP, TRI)) {
5935 // True if there is no chance that any outlined candidate from this range
5936 // could require stack fixups. That is, both
5937 // * LR is available in the range (No save/restore around call)
5938 // * The range doesn't include calls (No save/restore in outlined frame)
5939 // are true.
5940 // FIXME: This is very restrictive; the flags check the whole block,
5941 // not just the bit we will try to outline.
5942 bool MightNeedStackFixUp =
5943 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere |
5944 MachineOutlinerMBBFlags::HasCalls));
5945
5946 if (!MightNeedStackFixUp)
5947 return outliner::InstrType::Legal;
5948
5949 return outliner::InstrType::Illegal;
5950 }
5951
5952 // Be conservative with IT blocks.
5953 if (MI.readsRegister(ARM::ITSTATE, TRI) ||
5954 MI.modifiesRegister(ARM::ITSTATE, TRI))
5955 return outliner::InstrType::Illegal;
5956
5957 // Don't outline positions.
5958 if (MI.isPosition())
5959 return outliner::InstrType::Illegal;
5960
5961 return outliner::InstrType::Legal;
5962 }
5963
buildOutlinedFrame(MachineBasicBlock & MBB,MachineFunction & MF,const outliner::OutlinedFunction & OF) const5964 void ARMBaseInstrInfo::buildOutlinedFrame(
5965 MachineBasicBlock &MBB, MachineFunction &MF,
5966 const outliner::OutlinedFunction &OF) const {
5967 // Nothing is needed for tail-calls.
5968 if (OF.FrameConstructionID == MachineOutlinerTailCall)
5969 return;
5970
5971 // For thunk outlining, rewrite the last instruction from a call to a
5972 // tail-call.
5973 if (OF.FrameConstructionID == MachineOutlinerThunk) {
5974 MachineInstr *Call = &*--MBB.instr_end();
5975 bool isThumb = Subtarget.isThumb();
5976 unsigned FuncOp = isThumb ? 2 : 0;
5977 unsigned Opc = Call->getOperand(FuncOp).isReg()
5978 ? isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
5979 : isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd
5980 : ARM::tTAILJMPdND
5981 : ARM::TAILJMPd;
5982 MachineInstrBuilder MIB = BuildMI(MBB, MBB.end(), DebugLoc(), get(Opc))
5983 .add(Call->getOperand(FuncOp));
5984 if (isThumb && !Call->getOperand(FuncOp).isReg())
5985 MIB.add(predOps(ARMCC::AL));
5986 Call->eraseFromParent();
5987 return;
5988 }
5989
5990 // Here we have to insert the return ourselves. Get the correct opcode from
5991 // current feature set.
5992 BuildMI(MBB, MBB.end(), DebugLoc(), get(Subtarget.getReturnOpcode()))
5993 .add(predOps(ARMCC::AL));
5994 }
5995
insertOutlinedCall(Module & M,MachineBasicBlock & MBB,MachineBasicBlock::iterator & It,MachineFunction & MF,const outliner::Candidate & C) const5996 MachineBasicBlock::iterator ARMBaseInstrInfo::insertOutlinedCall(
5997 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
5998 MachineFunction &MF, const outliner::Candidate &C) const {
5999 MachineInstrBuilder MIB;
6000 MachineBasicBlock::iterator CallPt;
6001 unsigned Opc;
6002 bool isThumb = Subtarget.isThumb();
6003
6004 // Are we tail calling?
6005 if (C.CallConstructionID == MachineOutlinerTailCall) {
6006 // If yes, then we can just branch to the label.
6007 Opc = isThumb
6008 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6009 : ARM::TAILJMPd;
6010 MIB = BuildMI(MF, DebugLoc(), get(Opc))
6011 .addGlobalAddress(M.getNamedValue(MF.getName()));
6012 if (isThumb)
6013 MIB.add(predOps(ARMCC::AL));
6014 It = MBB.insert(It, MIB);
6015 return It;
6016 }
6017
6018 // Create the call instruction.
6019 Opc = isThumb ? ARM::tBL : ARM::BL;
6020 MachineInstrBuilder CallMIB = BuildMI(MF, DebugLoc(), get(Opc));
6021 if (isThumb)
6022 CallMIB.add(predOps(ARMCC::AL));
6023 CallMIB.addGlobalAddress(M.getNamedValue(MF.getName()));
6024
6025 // Can we save to a register?
6026 if (C.CallConstructionID == MachineOutlinerRegSave) {
6027 unsigned Reg = findRegisterToSaveLRTo(C);
6028 assert(Reg != 0 && "No callee-saved register available?");
6029
6030 // Save and restore LR from that register.
6031 if (!MBB.isLiveIn(ARM::LR))
6032 MBB.addLiveIn(ARM::LR);
6033 copyPhysReg(MBB, It, DebugLoc(), Reg, ARM::LR, true);
6034 CallPt = MBB.insert(It, CallMIB);
6035 copyPhysReg(MBB, It, DebugLoc(), ARM::LR, Reg, true);
6036 It--;
6037 return CallPt;
6038 }
6039 // Insert the call.
6040 It = MBB.insert(It, CallMIB);
6041 return It;
6042 }
6043