1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Thumb-2 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "Thumb2InstrInfo.h"
14 #include "ARMMachineFunctionInfo.h"
15 #include "MCTargetDesc/ARMAddressingModes.h"
16 #include "llvm/CodeGen/MachineBasicBlock.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstr.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineMemOperand.h"
22 #include "llvm/CodeGen/MachineOperand.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetRegisterInfo.h"
25 #include "llvm/IR/DebugLoc.h"
26 #include "llvm/MC/MCInst.h"
27 #include "llvm/MC/MCInstrDesc.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include <cassert>
33
34 using namespace llvm;
35
36 static cl::opt<bool>
37 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden,
38 cl::desc("Use old-style Thumb2 if-conversion heuristics"),
39 cl::init(false));
40
Thumb2InstrInfo(const ARMSubtarget & STI)41 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
42 : ARMBaseInstrInfo(STI) {}
43
44 /// Return the noop instruction to use for a noop.
getNoop(MCInst & NopInst) const45 void Thumb2InstrInfo::getNoop(MCInst &NopInst) const {
46 NopInst.setOpcode(ARM::tHINT);
47 NopInst.addOperand(MCOperand::createImm(0));
48 NopInst.addOperand(MCOperand::createImm(ARMCC::AL));
49 NopInst.addOperand(MCOperand::createReg(0));
50 }
51
getUnindexedOpcode(unsigned Opc) const52 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
53 // FIXME
54 return 0;
55 }
56
57 void
ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,MachineBasicBlock * NewDest) const58 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
59 MachineBasicBlock *NewDest) const {
60 MachineBasicBlock *MBB = Tail->getParent();
61 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
62 if (!AFI->hasITBlocks() || Tail->isBranch()) {
63 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
64 return;
65 }
66
67 // If the first instruction of Tail is predicated, we may have to update
68 // the IT instruction.
69 unsigned PredReg = 0;
70 ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg);
71 MachineBasicBlock::iterator MBBI = Tail;
72 if (CC != ARMCC::AL)
73 // Expecting at least the t2IT instruction before it.
74 --MBBI;
75
76 // Actually replace the tail.
77 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
78
79 // Fix up IT.
80 if (CC != ARMCC::AL) {
81 MachineBasicBlock::iterator E = MBB->begin();
82 unsigned Count = 4; // At most 4 instructions in an IT block.
83 while (Count && MBBI != E) {
84 if (MBBI->isDebugInstr()) {
85 --MBBI;
86 continue;
87 }
88 if (MBBI->getOpcode() == ARM::t2IT) {
89 unsigned Mask = MBBI->getOperand(1).getImm();
90 if (Count == 4)
91 MBBI->eraseFromParent();
92 else {
93 unsigned MaskOn = 1 << Count;
94 unsigned MaskOff = ~(MaskOn - 1);
95 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
96 }
97 return;
98 }
99 --MBBI;
100 --Count;
101 }
102
103 // Ctrl flow can reach here if branch folding is run before IT block
104 // formation pass.
105 }
106 }
107
108 bool
isLegalToSplitMBBAt(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI) const109 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
110 MachineBasicBlock::iterator MBBI) const {
111 while (MBBI->isDebugInstr()) {
112 ++MBBI;
113 if (MBBI == MBB.end())
114 return false;
115 }
116
117 unsigned PredReg = 0;
118 return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL;
119 }
120
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc) const121 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
122 MachineBasicBlock::iterator I,
123 const DebugLoc &DL, MCRegister DestReg,
124 MCRegister SrcReg, bool KillSrc) const {
125 // Handle SPR, DPR, and QPR copies.
126 if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
127 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
128
129 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg)
130 .addReg(SrcReg, getKillRegState(KillSrc))
131 .add(predOps(ARMCC::AL));
132 }
133
134 void Thumb2InstrInfo::
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned SrcReg,bool isKill,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const135 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
136 unsigned SrcReg, bool isKill, int FI,
137 const TargetRegisterClass *RC,
138 const TargetRegisterInfo *TRI) const {
139 DebugLoc DL;
140 if (I != MBB.end()) DL = I->getDebugLoc();
141
142 MachineFunction &MF = *MBB.getParent();
143 MachineFrameInfo &MFI = MF.getFrameInfo();
144 MachineMemOperand *MMO = MF.getMachineMemOperand(
145 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
146 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
147
148 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
149 BuildMI(MBB, I, DL, get(ARM::t2STRi12))
150 .addReg(SrcReg, getKillRegState(isKill))
151 .addFrameIndex(FI)
152 .addImm(0)
153 .addMemOperand(MMO)
154 .add(predOps(ARMCC::AL));
155 return;
156 }
157
158 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
159 // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for
160 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
161 // otherwise).
162 if (Register::isVirtualRegister(SrcReg)) {
163 MachineRegisterInfo *MRI = &MF.getRegInfo();
164 MRI->constrainRegClass(SrcReg, &ARM::GPRPairnospRegClass);
165 }
166
167 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8));
168 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
169 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
170 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
171 return;
172 }
173
174 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI);
175 }
176
177 void Thumb2InstrInfo::
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned DestReg,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const178 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
179 unsigned DestReg, int FI,
180 const TargetRegisterClass *RC,
181 const TargetRegisterInfo *TRI) const {
182 MachineFunction &MF = *MBB.getParent();
183 MachineFrameInfo &MFI = MF.getFrameInfo();
184 MachineMemOperand *MMO = MF.getMachineMemOperand(
185 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
186 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
187 DebugLoc DL;
188 if (I != MBB.end()) DL = I->getDebugLoc();
189
190 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
191 BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg)
192 .addFrameIndex(FI)
193 .addImm(0)
194 .addMemOperand(MMO)
195 .add(predOps(ARMCC::AL));
196 return;
197 }
198
199 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
200 // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for
201 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
202 // otherwise).
203 if (Register::isVirtualRegister(DestReg)) {
204 MachineRegisterInfo *MRI = &MF.getRegInfo();
205 MRI->constrainRegClass(DestReg, &ARM::GPRPairnospRegClass);
206 }
207
208 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8));
209 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
210 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
211 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
212
213 if (Register::isPhysicalRegister(DestReg))
214 MIB.addReg(DestReg, RegState::ImplicitDefine);
215 return;
216 }
217
218 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
219 }
220
expandLoadStackGuard(MachineBasicBlock::iterator MI) const221 void Thumb2InstrInfo::expandLoadStackGuard(
222 MachineBasicBlock::iterator MI) const {
223 MachineFunction &MF = *MI->getParent()->getParent();
224 if (MF.getTarget().isPositionIndependent())
225 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12);
226 else
227 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12);
228 }
229
emitT2RegPlusImmediate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,unsigned DestReg,unsigned BaseReg,int NumBytes,ARMCC::CondCodes Pred,unsigned PredReg,const ARMBaseInstrInfo & TII,unsigned MIFlags)230 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
231 MachineBasicBlock::iterator &MBBI,
232 const DebugLoc &dl, unsigned DestReg,
233 unsigned BaseReg, int NumBytes,
234 ARMCC::CondCodes Pred, unsigned PredReg,
235 const ARMBaseInstrInfo &TII,
236 unsigned MIFlags) {
237 if (NumBytes == 0 && DestReg != BaseReg) {
238 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
239 .addReg(BaseReg, RegState::Kill)
240 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
241 return;
242 }
243
244 bool isSub = NumBytes < 0;
245 if (isSub) NumBytes = -NumBytes;
246
247 // If profitable, use a movw or movt to materialize the offset.
248 // FIXME: Use the scavenger to grab a scratch register.
249 if (DestReg != ARM::SP && DestReg != BaseReg &&
250 NumBytes >= 4096 &&
251 ARM_AM::getT2SOImmVal(NumBytes) == -1) {
252 bool Fits = false;
253 if (NumBytes < 65536) {
254 // Use a movw to materialize the 16-bit constant.
255 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
256 .addImm(NumBytes)
257 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
258 Fits = true;
259 } else if ((NumBytes & 0xffff) == 0) {
260 // Use a movt to materialize the 32-bit constant.
261 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
262 .addReg(DestReg)
263 .addImm(NumBytes >> 16)
264 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
265 Fits = true;
266 }
267
268 if (Fits) {
269 if (isSub) {
270 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg)
271 .addReg(BaseReg)
272 .addReg(DestReg, RegState::Kill)
273 .add(predOps(Pred, PredReg))
274 .add(condCodeOp())
275 .setMIFlags(MIFlags);
276 } else {
277 // Here we know that DestReg is not SP but we do not
278 // know anything about BaseReg. t2ADDrr is an invalid
279 // instruction is SP is used as the second argument, but
280 // is fine if SP is the first argument. To be sure we
281 // do not generate invalid encoding, put BaseReg first.
282 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg)
283 .addReg(BaseReg)
284 .addReg(DestReg, RegState::Kill)
285 .add(predOps(Pred, PredReg))
286 .add(condCodeOp())
287 .setMIFlags(MIFlags);
288 }
289 return;
290 }
291 }
292
293 while (NumBytes) {
294 unsigned ThisVal = NumBytes;
295 unsigned Opc = 0;
296 if (DestReg == ARM::SP && BaseReg != ARM::SP) {
297 // mov sp, rn. Note t2MOVr cannot be used.
298 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
299 .addReg(BaseReg)
300 .setMIFlags(MIFlags)
301 .add(predOps(ARMCC::AL));
302 BaseReg = ARM::SP;
303 continue;
304 }
305
306 assert((DestReg != ARM::SP || BaseReg == ARM::SP) &&
307 "Writing to SP, from other register.");
308
309 // Try to use T1, as it smaller
310 if ((DestReg == ARM::SP) && (ThisVal < ((1 << 7) - 1) * 4)) {
311 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?");
312 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
313 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
314 .addReg(BaseReg)
315 .addImm(ThisVal / 4)
316 .setMIFlags(MIFlags)
317 .add(predOps(ARMCC::AL));
318 break;
319 }
320 bool HasCCOut = true;
321 int ImmIsT2SO = ARM_AM::getT2SOImmVal(ThisVal);
322 bool ToSP = DestReg == ARM::SP;
323 unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
324 unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
325 unsigned t2SUBi12 = ToSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12;
326 unsigned t2ADDi12 = ToSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12;
327 Opc = isSub ? t2SUB : t2ADD;
328 // Prefer T2: sub rd, rn, so_imm | sub sp, sp, so_imm
329 if (ImmIsT2SO != -1) {
330 NumBytes = 0;
331 } else if (ThisVal < 4096) {
332 // Prefer T3 if can make it in a single go: subw rd, rn, imm12 | subw sp,
333 // sp, imm12
334 Opc = isSub ? t2SUBi12 : t2ADDi12;
335 HasCCOut = false;
336 NumBytes = 0;
337 } else {
338 // Use one T2 instruction to reduce NumBytes
339 // FIXME: Move this to ARMAddressingModes.h?
340 unsigned RotAmt = countLeadingZeros(ThisVal);
341 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
342 NumBytes &= ~ThisVal;
343 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
344 "Bit extraction didn't work?");
345 }
346
347 // Build the new ADD / SUB.
348 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
349 .addReg(BaseReg, RegState::Kill)
350 .addImm(ThisVal)
351 .add(predOps(ARMCC::AL))
352 .setMIFlags(MIFlags);
353 if (HasCCOut)
354 MIB.add(condCodeOp());
355
356 BaseReg = DestReg;
357 }
358 }
359
360 static unsigned
negativeOffsetOpcode(unsigned opcode)361 negativeOffsetOpcode(unsigned opcode)
362 {
363 switch (opcode) {
364 case ARM::t2LDRi12: return ARM::t2LDRi8;
365 case ARM::t2LDRHi12: return ARM::t2LDRHi8;
366 case ARM::t2LDRBi12: return ARM::t2LDRBi8;
367 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8;
368 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8;
369 case ARM::t2STRi12: return ARM::t2STRi8;
370 case ARM::t2STRBi12: return ARM::t2STRBi8;
371 case ARM::t2STRHi12: return ARM::t2STRHi8;
372 case ARM::t2PLDi12: return ARM::t2PLDi8;
373 case ARM::t2PLDWi12: return ARM::t2PLDWi8;
374 case ARM::t2PLIi12: return ARM::t2PLIi8;
375
376 case ARM::t2LDRi8:
377 case ARM::t2LDRHi8:
378 case ARM::t2LDRBi8:
379 case ARM::t2LDRSHi8:
380 case ARM::t2LDRSBi8:
381 case ARM::t2STRi8:
382 case ARM::t2STRBi8:
383 case ARM::t2STRHi8:
384 case ARM::t2PLDi8:
385 case ARM::t2PLDWi8:
386 case ARM::t2PLIi8:
387 return opcode;
388
389 default:
390 llvm_unreachable("unknown thumb2 opcode.");
391 }
392 }
393
394 static unsigned
positiveOffsetOpcode(unsigned opcode)395 positiveOffsetOpcode(unsigned opcode)
396 {
397 switch (opcode) {
398 case ARM::t2LDRi8: return ARM::t2LDRi12;
399 case ARM::t2LDRHi8: return ARM::t2LDRHi12;
400 case ARM::t2LDRBi8: return ARM::t2LDRBi12;
401 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12;
402 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12;
403 case ARM::t2STRi8: return ARM::t2STRi12;
404 case ARM::t2STRBi8: return ARM::t2STRBi12;
405 case ARM::t2STRHi8: return ARM::t2STRHi12;
406 case ARM::t2PLDi8: return ARM::t2PLDi12;
407 case ARM::t2PLDWi8: return ARM::t2PLDWi12;
408 case ARM::t2PLIi8: return ARM::t2PLIi12;
409
410 case ARM::t2LDRi12:
411 case ARM::t2LDRHi12:
412 case ARM::t2LDRBi12:
413 case ARM::t2LDRSHi12:
414 case ARM::t2LDRSBi12:
415 case ARM::t2STRi12:
416 case ARM::t2STRBi12:
417 case ARM::t2STRHi12:
418 case ARM::t2PLDi12:
419 case ARM::t2PLDWi12:
420 case ARM::t2PLIi12:
421 return opcode;
422
423 default:
424 llvm_unreachable("unknown thumb2 opcode.");
425 }
426 }
427
428 static unsigned
immediateOffsetOpcode(unsigned opcode)429 immediateOffsetOpcode(unsigned opcode)
430 {
431 switch (opcode) {
432 case ARM::t2LDRs: return ARM::t2LDRi12;
433 case ARM::t2LDRHs: return ARM::t2LDRHi12;
434 case ARM::t2LDRBs: return ARM::t2LDRBi12;
435 case ARM::t2LDRSHs: return ARM::t2LDRSHi12;
436 case ARM::t2LDRSBs: return ARM::t2LDRSBi12;
437 case ARM::t2STRs: return ARM::t2STRi12;
438 case ARM::t2STRBs: return ARM::t2STRBi12;
439 case ARM::t2STRHs: return ARM::t2STRHi12;
440 case ARM::t2PLDs: return ARM::t2PLDi12;
441 case ARM::t2PLDWs: return ARM::t2PLDWi12;
442 case ARM::t2PLIs: return ARM::t2PLIi12;
443
444 case ARM::t2LDRi12:
445 case ARM::t2LDRHi12:
446 case ARM::t2LDRBi12:
447 case ARM::t2LDRSHi12:
448 case ARM::t2LDRSBi12:
449 case ARM::t2STRi12:
450 case ARM::t2STRBi12:
451 case ARM::t2STRHi12:
452 case ARM::t2PLDi12:
453 case ARM::t2PLDWi12:
454 case ARM::t2PLIi12:
455 case ARM::t2LDRi8:
456 case ARM::t2LDRHi8:
457 case ARM::t2LDRBi8:
458 case ARM::t2LDRSHi8:
459 case ARM::t2LDRSBi8:
460 case ARM::t2STRi8:
461 case ARM::t2STRBi8:
462 case ARM::t2STRHi8:
463 case ARM::t2PLDi8:
464 case ARM::t2PLDWi8:
465 case ARM::t2PLIi8:
466 return opcode;
467
468 default:
469 llvm_unreachable("unknown thumb2 opcode.");
470 }
471 }
472
rewriteT2FrameIndex(MachineInstr & MI,unsigned FrameRegIdx,unsigned FrameReg,int & Offset,const ARMBaseInstrInfo & TII,const TargetRegisterInfo * TRI)473 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
474 unsigned FrameReg, int &Offset,
475 const ARMBaseInstrInfo &TII,
476 const TargetRegisterInfo *TRI) {
477 unsigned Opcode = MI.getOpcode();
478 const MCInstrDesc &Desc = MI.getDesc();
479 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
480 bool isSub = false;
481
482 MachineFunction &MF = *MI.getParent()->getParent();
483 const TargetRegisterClass *RegClass =
484 TII.getRegClass(Desc, FrameRegIdx, TRI, MF);
485
486 // Memory operands in inline assembly always use AddrModeT2_i12.
487 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
488 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
489
490 const bool IsSP = Opcode == ARM::t2ADDspImm12 || Opcode == ARM::t2ADDspImm;
491 if (IsSP || Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
492 Offset += MI.getOperand(FrameRegIdx+1).getImm();
493
494 unsigned PredReg;
495 if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL &&
496 !MI.definesRegister(ARM::CPSR)) {
497 // Turn it into a move.
498 MI.setDesc(TII.get(ARM::tMOVr));
499 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
500 // Remove offset and remaining explicit predicate operands.
501 do MI.RemoveOperand(FrameRegIdx+1);
502 while (MI.getNumOperands() > FrameRegIdx+1);
503 MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI);
504 MIB.add(predOps(ARMCC::AL));
505 return true;
506 }
507
508 bool HasCCOut = (Opcode != ARM::t2ADDspImm12 && Opcode != ARM::t2ADDri12);
509
510 if (Offset < 0) {
511 Offset = -Offset;
512 isSub = true;
513 MI.setDesc(IsSP ? TII.get(ARM::t2SUBspImm) : TII.get(ARM::t2SUBri));
514 } else {
515 MI.setDesc(IsSP ? TII.get(ARM::t2ADDspImm) : TII.get(ARM::t2ADDri));
516 }
517
518 // Common case: small offset, fits into instruction.
519 if (ARM_AM::getT2SOImmVal(Offset) != -1) {
520 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
521 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
522 // Add cc_out operand if the original instruction did not have one.
523 if (!HasCCOut)
524 MI.addOperand(MachineOperand::CreateReg(0, false));
525 Offset = 0;
526 return true;
527 }
528 // Another common case: imm12.
529 if (Offset < 4096 &&
530 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) {
531 unsigned NewOpc = isSub ? IsSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12
532 : IsSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12;
533 MI.setDesc(TII.get(NewOpc));
534 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
535 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
536 // Remove the cc_out operand.
537 if (HasCCOut)
538 MI.RemoveOperand(MI.getNumOperands()-1);
539 Offset = 0;
540 return true;
541 }
542
543 // Otherwise, extract 8 adjacent bits from the immediate into this
544 // t2ADDri/t2SUBri.
545 unsigned RotAmt = countLeadingZeros<unsigned>(Offset);
546 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt);
547
548 // We will handle these bits from offset, clear them.
549 Offset &= ~ThisImmVal;
550
551 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 &&
552 "Bit extraction didn't work?");
553 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
554 // Add cc_out operand if the original instruction did not have one.
555 if (!HasCCOut)
556 MI.addOperand(MachineOperand::CreateReg(0, false));
557 } else {
558 // AddrMode4 and AddrMode6 cannot handle any offset.
559 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
560 return false;
561
562 // AddrModeT2_so cannot handle any offset. If there is no offset
563 // register then we change to an immediate version.
564 unsigned NewOpc = Opcode;
565 if (AddrMode == ARMII::AddrModeT2_so) {
566 Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg();
567 if (OffsetReg != 0) {
568 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
569 return Offset == 0;
570 }
571
572 MI.RemoveOperand(FrameRegIdx+1);
573 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
574 NewOpc = immediateOffsetOpcode(Opcode);
575 AddrMode = ARMII::AddrModeT2_i12;
576 }
577
578 unsigned NumBits = 0;
579 unsigned Scale = 1;
580 if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) {
581 // i8 supports only negative, and i12 supports only positive, so
582 // based on Offset sign convert Opcode to the appropriate
583 // instruction
584 Offset += MI.getOperand(FrameRegIdx+1).getImm();
585 if (Offset < 0) {
586 NewOpc = negativeOffsetOpcode(Opcode);
587 NumBits = 8;
588 isSub = true;
589 Offset = -Offset;
590 } else {
591 NewOpc = positiveOffsetOpcode(Opcode);
592 NumBits = 12;
593 }
594 } else if (AddrMode == ARMII::AddrMode5) {
595 // VFP address mode.
596 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
597 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
598 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
599 InstrOffs *= -1;
600 NumBits = 8;
601 Scale = 4;
602 Offset += InstrOffs * 4;
603 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
604 if (Offset < 0) {
605 Offset = -Offset;
606 isSub = true;
607 }
608 } else if (AddrMode == ARMII::AddrMode5FP16) {
609 // VFP address mode.
610 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
611 int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm());
612 if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub)
613 InstrOffs *= -1;
614 NumBits = 8;
615 Scale = 2;
616 Offset += InstrOffs * 2;
617 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
618 if (Offset < 0) {
619 Offset = -Offset;
620 isSub = true;
621 }
622 } else if (AddrMode == ARMII::AddrModeT2_i7s4 ||
623 AddrMode == ARMII::AddrModeT2_i7s2 ||
624 AddrMode == ARMII::AddrModeT2_i7) {
625 Offset += MI.getOperand(FrameRegIdx + 1).getImm();
626 unsigned OffsetMask;
627 switch (AddrMode) {
628 case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break;
629 case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break;
630 default: NumBits = 7; OffsetMask = 0x0; break;
631 }
632 // MCInst operand expects already scaled value.
633 Scale = 1;
634 assert((Offset & OffsetMask) == 0 && "Can't encode this offset!");
635 (void)OffsetMask; // squash unused-variable warning at -NDEBUG
636 } else if (AddrMode == ARMII::AddrModeT2_i8s4) {
637 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4;
638 NumBits = 8 + 2;
639 // MCInst operand expects already scaled value.
640 Scale = 1;
641 assert((Offset & 3) == 0 && "Can't encode this offset!");
642 } else if (AddrMode == ARMII::AddrModeT2_ldrex) {
643 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4;
644 NumBits = 8; // 8 bits scaled by 4
645 Scale = 4;
646 assert((Offset & 3) == 0 && "Can't encode this offset!");
647 } else {
648 llvm_unreachable("Unsupported addressing mode!");
649 }
650
651 if (NewOpc != Opcode)
652 MI.setDesc(TII.get(NewOpc));
653
654 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1);
655
656 // Attempt to fold address computation
657 // Common case: small offset, fits into instruction. We need to make sure
658 // the register class is correct too, for instructions like the MVE
659 // VLDRH.32, which only accepts low tGPR registers.
660 int ImmedOffset = Offset / Scale;
661 unsigned Mask = (1 << NumBits) - 1;
662 if ((unsigned)Offset <= Mask * Scale &&
663 (Register::isVirtualRegister(FrameReg) ||
664 RegClass->contains(FrameReg))) {
665 if (Register::isVirtualRegister(FrameReg)) {
666 // Make sure the register class for the virtual register is correct
667 MachineRegisterInfo *MRI = &MF.getRegInfo();
668 if (!MRI->constrainRegClass(FrameReg, RegClass))
669 llvm_unreachable("Unable to constrain virtual register class.");
670 }
671
672 // Replace the FrameIndex with fp/sp
673 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
674 if (isSub) {
675 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
676 // FIXME: Not consistent.
677 ImmedOffset |= 1 << NumBits;
678 else
679 ImmedOffset = -ImmedOffset;
680 }
681 ImmOp.ChangeToImmediate(ImmedOffset);
682 Offset = 0;
683 return true;
684 }
685
686 // Otherwise, offset doesn't fit. Pull in what we can to simplify
687 ImmedOffset = ImmedOffset & Mask;
688 if (isSub) {
689 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
690 // FIXME: Not consistent.
691 ImmedOffset |= 1 << NumBits;
692 else {
693 ImmedOffset = -ImmedOffset;
694 if (ImmedOffset == 0)
695 // Change the opcode back if the encoded offset is zero.
696 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc)));
697 }
698 }
699 ImmOp.ChangeToImmediate(ImmedOffset);
700 Offset &= ~(Mask*Scale);
701 }
702
703 Offset = (isSub) ? -Offset : Offset;
704 return Offset == 0 && (Register::isVirtualRegister(FrameReg) ||
705 RegClass->contains(FrameReg));
706 }
707
getITInstrPredicate(const MachineInstr & MI,unsigned & PredReg)708 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI,
709 unsigned &PredReg) {
710 unsigned Opc = MI.getOpcode();
711 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
712 return ARMCC::AL;
713 return getInstrPredicate(MI, PredReg);
714 }
715
findFirstVPTPredOperandIdx(const MachineInstr & MI)716 int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) {
717 const MCInstrDesc &MCID = MI.getDesc();
718
719 if (!MCID.OpInfo)
720 return -1;
721
722 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
723 if (ARM::isVpred(MCID.OpInfo[i].OperandType))
724 return i;
725
726 return -1;
727 }
728
getVPTInstrPredicate(const MachineInstr & MI,unsigned & PredReg)729 ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI,
730 unsigned &PredReg) {
731 int PIdx = findFirstVPTPredOperandIdx(MI);
732 if (PIdx == -1) {
733 PredReg = 0;
734 return ARMVCC::None;
735 }
736
737 PredReg = MI.getOperand(PIdx+1).getReg();
738 return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm();
739 }
740