1 //===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Thumb-1 implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "ThumbRegisterInfo.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMMachineFunctionInfo.h"
17 #include "ARMSubtarget.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CodeGen/MachineConstantPool.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/RegisterScavenging.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/CodeGen/TargetFrameLowering.h"
32 #include "llvm/Target/TargetMachine.h"
33 
34 namespace llvm {
35 extern cl::opt<bool> ReuseFrameIndexVals;
36 }
37 
38 using namespace llvm;
39 
40 ThumbRegisterInfo::ThumbRegisterInfo() = default;
41 
42 const TargetRegisterClass *
43 ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
44                                               const MachineFunction &MF) const {
45   if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
46     return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
47 
48   if (ARM::tGPRRegClass.hasSubClassEq(RC))
49     return &ARM::tGPRRegClass;
50   return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
51 }
52 
53 const TargetRegisterClass *
54 ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
55                                       unsigned Kind) const {
56   if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
57     return ARMBaseRegisterInfo::getPointerRegClass(MF, Kind);
58   return &ARM::tGPRRegClass;
59 }
60 
61 static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
62                                     MachineBasicBlock::iterator &MBBI,
63                                     const DebugLoc &dl, unsigned DestReg,
64                                     unsigned SubIdx, int Val,
65                                     ARMCC::CondCodes Pred, unsigned PredReg,
66                                     unsigned MIFlags) {
67   MachineFunction &MF = *MBB.getParent();
68   const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
69   const TargetInstrInfo &TII = *STI.getInstrInfo();
70   MachineConstantPool *ConstantPool = MF.getConstantPool();
71   const Constant *C = ConstantInt::get(
72           Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
73   unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
74 
75   BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
76     .addReg(DestReg, getDefRegState(true), SubIdx)
77     .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg)
78     .setMIFlags(MIFlags);
79 }
80 
81 static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
82                                     MachineBasicBlock::iterator &MBBI,
83                                     const DebugLoc &dl, unsigned DestReg,
84                                     unsigned SubIdx, int Val,
85                                     ARMCC::CondCodes Pred, unsigned PredReg,
86                                     unsigned MIFlags) {
87   MachineFunction &MF = *MBB.getParent();
88   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
89   MachineConstantPool *ConstantPool = MF.getConstantPool();
90   const Constant *C = ConstantInt::get(
91            Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
92   unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
93 
94   BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
95       .addReg(DestReg, getDefRegState(true), SubIdx)
96       .addConstantPoolIndex(Idx)
97       .add(predOps(ARMCC::AL))
98       .setMIFlags(MIFlags);
99 }
100 
101 /// emitLoadConstPool - Emits a load from constpool to materialize the
102 /// specified immediate.
103 void ThumbRegisterInfo::emitLoadConstPool(
104     MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
105     const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val,
106     ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const {
107   MachineFunction &MF = *MBB.getParent();
108   const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
109   if (STI.isThumb1Only()) {
110     assert((isARMLowRegister(DestReg) || DestReg.isVirtual()) &&
111            "Thumb1 does not have ldr to high register");
112     return emitThumb1LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
113                                    PredReg, MIFlags);
114   }
115   return emitThumb2LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
116                                  PredReg, MIFlags);
117 }
118 
119 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize a
120 /// destreg = basereg + immediate in Thumb code. Materialize the immediate in a
121 /// register using mov / mvn (armv6-M >) sequences, movs / lsls / adds / lsls /
122 /// adds / lsls / adds sequences (armv6-M) or load the immediate from a
123 /// constpool entry.
124 static void emitThumbRegPlusImmInReg(
125     MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
126     const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes,
127     bool CanChangeCC, const TargetInstrInfo &TII,
128     const ARMBaseRegisterInfo &MRI, unsigned MIFlags = MachineInstr::NoFlags) {
129   MachineFunction &MF = *MBB.getParent();
130   const ARMSubtarget &ST = MF.getSubtarget<ARMSubtarget>();
131 
132   // Use a single sp-relative add if the immediate is small enough.
133   if (BaseReg == ARM::SP &&
134       (DestReg.isVirtual() || isARMLowRegister(DestReg)) && NumBytes >= 0 &&
135       NumBytes <= 1020 && (NumBytes % 4) == 0) {
136     BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), DestReg)
137         .addReg(ARM::SP)
138         .addImm(NumBytes / 4)
139         .add(predOps(ARMCC::AL))
140         .setMIFlags(MIFlags);
141     return;
142   }
143 
144   bool isHigh = !isARMLowRegister(DestReg) ||
145                 (BaseReg != 0 && !isARMLowRegister(BaseReg));
146   bool isSub = false;
147   // Subtract doesn't have high register version. Load the negative value
148   // if either base or dest register is a high register. Also, if do not
149   // issue sub as part of the sequence if condition register is to be
150   // preserved.
151   if (NumBytes < 0 && !isHigh && CanChangeCC) {
152     isSub = true;
153     NumBytes = -NumBytes;
154   }
155   Register LdReg = DestReg;
156   if (DestReg == ARM::SP)
157     assert(BaseReg == ARM::SP && "Unexpected!");
158   if (!isARMLowRegister(DestReg) && !DestReg.isVirtual())
159     LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
160 
161   if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
162     BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg)
163         .add(t1CondCodeOp())
164         .addImm(NumBytes)
165         .setMIFlags(MIFlags);
166   } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
167     BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg)
168         .add(t1CondCodeOp())
169         .addImm(NumBytes)
170         .setMIFlags(MIFlags);
171     BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg)
172         .add(t1CondCodeOp())
173         .addReg(LdReg, RegState::Kill)
174         .setMIFlags(MIFlags);
175   } else if (ST.genExecuteOnly()) {
176     if (ST.useMovt()) {
177       BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm ), LdReg)
178           .addImm(NumBytes)
179           .setMIFlags(MIFlags);
180     } else if (!CanChangeCC) {
181       // tMOVi32imm is lowered to a sequence of flag-setting instructions, so
182       // if CPSR is live we need to save and restore CPSR around it.
183       // TODO Try inserting the tMOVi32imm at an earlier point, where CPSR is
184       // dead.
185       bool LiveCpsr = false, CpsrWrite = false;
186       auto isCpsr = [](auto &MO) { return MO.getReg() == ARM::CPSR; };
187       for (auto Iter = MBBI; Iter != MBB.instr_end(); ++Iter) {
188         // If CPSR is used after this instruction (and there's not a def before
189         // that) then CPSR is live.
190         if (any_of(Iter->all_uses(), isCpsr)) {
191           LiveCpsr = true;
192           break;
193         }
194         if (any_of(Iter->all_defs(), isCpsr)) {
195           CpsrWrite = true;
196           break;
197         }
198       }
199       // If there's no use or def of CPSR then it may be live if it's a
200       // live-out value.
201       auto liveOutIsCpsr = [](auto &Out) { return Out.PhysReg == ARM::CPSR; };
202       if (!LiveCpsr && !CpsrWrite)
203         LiveCpsr = any_of(MBB.liveouts(), liveOutIsCpsr);
204 
205       Register CPSRSaveReg;
206       unsigned APSREncoding;
207       if (LiveCpsr) {
208         CPSRSaveReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
209         APSREncoding =
210             ARMSysReg::lookupMClassSysRegByName("apsr_nzcvq")->Encoding;
211         BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MRS_M), CPSRSaveReg)
212             .addImm(APSREncoding)
213             .add(predOps(ARMCC::AL))
214             .addReg(ARM::CPSR, RegState::Implicit);
215       }
216       BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi32imm), LdReg)
217           .addImm(NumBytes)
218           .setMIFlags(MIFlags);
219       if (LiveCpsr) {
220         BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MSR_M))
221             .addImm(APSREncoding)
222             .addReg(CPSRSaveReg, RegState::Kill)
223             .add(predOps(ARMCC::AL));
224       }
225     } else {
226       BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi32imm), LdReg)
227           .addImm(NumBytes)
228           .setMIFlags(MIFlags);
229     }
230   } else
231     MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes, ARMCC::AL, 0,
232                           MIFlags);
233 
234   // Emit add / sub.
235   int Opc = (isSub) ? ARM::tSUBrr
236                     : ((isHigh || !CanChangeCC) ? ARM::tADDhirr : ARM::tADDrr);
237   MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
238   if (Opc != ARM::tADDhirr)
239     MIB = MIB.add(t1CondCodeOp());
240   if (DestReg == ARM::SP || isSub)
241     MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
242   else
243     MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
244   MIB.add(predOps(ARMCC::AL));
245 }
246 
247 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
248 /// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
249 /// SUBs first, and uses a constant pool value if the instruction sequence would
250 /// be too long. This is allowed to modify the condition flags.
251 void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
252                                      MachineBasicBlock::iterator &MBBI,
253                                      const DebugLoc &dl, Register DestReg,
254                                      Register BaseReg, int NumBytes,
255                                      const TargetInstrInfo &TII,
256                                      const ARMBaseRegisterInfo &MRI,
257                                      unsigned MIFlags) {
258   bool isSub = NumBytes < 0;
259   unsigned Bytes = (unsigned)NumBytes;
260   if (isSub) Bytes = -NumBytes;
261 
262   int CopyOpc = 0;
263   unsigned CopyBits = 0;
264   unsigned CopyScale = 1;
265   bool CopyNeedsCC = false;
266   int ExtraOpc = 0;
267   unsigned ExtraBits = 0;
268   unsigned ExtraScale = 1;
269   bool ExtraNeedsCC = false;
270 
271   // Strategy:
272   // We need to select two types of instruction, maximizing the available
273   // immediate range of each. The instructions we use will depend on whether
274   // DestReg and BaseReg are low, high or the stack pointer.
275   // * CopyOpc  - DestReg = BaseReg + imm
276   //              This will be emitted once if DestReg != BaseReg, and never if
277   //              DestReg == BaseReg.
278   // * ExtraOpc - DestReg = DestReg + imm
279   //              This will be emitted as many times as necessary to add the
280   //              full immediate.
281   // If the immediate ranges of these instructions are not large enough to cover
282   // NumBytes with a reasonable number of instructions, we fall back to using a
283   // value loaded from a constant pool.
284   if (DestReg == ARM::SP) {
285     if (BaseReg == ARM::SP) {
286       // sp -> sp
287       // Already in right reg, no copy needed
288     } else {
289       // low -> sp or high -> sp
290       CopyOpc = ARM::tMOVr;
291       CopyBits = 0;
292     }
293     ExtraOpc = isSub ? ARM::tSUBspi : ARM::tADDspi;
294     ExtraBits = 7;
295     ExtraScale = 4;
296   } else if (isARMLowRegister(DestReg)) {
297     if (BaseReg == ARM::SP) {
298       // sp -> low
299       assert(!isSub && "Thumb1 does not have tSUBrSPi");
300       CopyOpc = ARM::tADDrSPi;
301       CopyBits = 8;
302       CopyScale = 4;
303     } else if (DestReg == BaseReg) {
304       // low -> same low
305       // Already in right reg, no copy needed
306     } else if (isARMLowRegister(BaseReg)) {
307       // low -> different low
308       CopyOpc = isSub ? ARM::tSUBi3 : ARM::tADDi3;
309       CopyBits = 3;
310       CopyNeedsCC = true;
311     } else {
312       // high -> low
313       CopyOpc = ARM::tMOVr;
314       CopyBits = 0;
315     }
316     ExtraOpc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
317     ExtraBits = 8;
318     ExtraNeedsCC = true;
319   } else /* DestReg is high */ {
320     if (DestReg == BaseReg) {
321       // high -> same high
322       // Already in right reg, no copy needed
323     } else {
324       // {low,high,sp} -> high
325       CopyOpc = ARM::tMOVr;
326       CopyBits = 0;
327     }
328     ExtraOpc = 0;
329   }
330 
331   // We could handle an unaligned immediate with an unaligned copy instruction
332   // and an aligned extra instruction, but this case is not currently needed.
333   assert(((Bytes & 3) == 0 || ExtraScale == 1) &&
334          "Unaligned offset, but all instructions require alignment");
335 
336   unsigned CopyRange = ((1 << CopyBits) - 1) * CopyScale;
337   // If we would emit the copy with an immediate of 0, just use tMOVr.
338   if (CopyOpc && Bytes < CopyScale) {
339     CopyOpc = ARM::tMOVr;
340     CopyScale = 1;
341     CopyNeedsCC = false;
342     CopyRange = 0;
343   }
344   unsigned ExtraRange = ((1 << ExtraBits) - 1) * ExtraScale; // per instruction
345   unsigned RequiredCopyInstrs = CopyOpc ? 1 : 0;
346   unsigned RangeAfterCopy = (CopyRange > Bytes) ? 0 : (Bytes - CopyRange);
347 
348   // We could handle this case when the copy instruction does not require an
349   // aligned immediate, but we do not currently do this.
350   assert(RangeAfterCopy % ExtraScale == 0 &&
351          "Extra instruction requires immediate to be aligned");
352 
353   unsigned RequiredExtraInstrs;
354   if (ExtraRange)
355     RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
356   else if (RangeAfterCopy > 0)
357     // We need an extra instruction but none is available
358     RequiredExtraInstrs = 1000000;
359   else
360     RequiredExtraInstrs = 0;
361   unsigned RequiredInstrs = RequiredCopyInstrs + RequiredExtraInstrs;
362   unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
363 
364   // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
365   if (RequiredInstrs > Threshold) {
366     emitThumbRegPlusImmInReg(MBB, MBBI, dl,
367                              DestReg, BaseReg, NumBytes, true,
368                              TII, MRI, MIFlags);
369     return;
370   }
371 
372   // Emit zero or one copy instructions
373   if (CopyOpc) {
374     unsigned CopyImm = std::min(Bytes, CopyRange) / CopyScale;
375     Bytes -= CopyImm * CopyScale;
376 
377     MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(CopyOpc), DestReg);
378     if (CopyNeedsCC)
379       MIB = MIB.add(t1CondCodeOp());
380     MIB.addReg(BaseReg, RegState::Kill);
381     if (CopyOpc != ARM::tMOVr) {
382       MIB.addImm(CopyImm);
383     }
384     MIB.setMIFlags(MIFlags).add(predOps(ARMCC::AL));
385 
386     BaseReg = DestReg;
387   }
388 
389   // Emit zero or more in-place add/sub instructions
390   while (Bytes) {
391     unsigned ExtraImm = std::min(Bytes, ExtraRange) / ExtraScale;
392     Bytes -= ExtraImm * ExtraScale;
393 
394     MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg);
395     if (ExtraNeedsCC)
396       MIB = MIB.add(t1CondCodeOp());
397     MIB.addReg(BaseReg)
398        .addImm(ExtraImm)
399        .add(predOps(ARMCC::AL))
400        .setMIFlags(MIFlags);
401   }
402 }
403 
404 static void removeOperands(MachineInstr &MI, unsigned i) {
405   unsigned Op = i;
406   for (unsigned e = MI.getNumOperands(); i != e; ++i)
407     MI.removeOperand(Op);
408 }
409 
410 /// convertToNonSPOpcode - Change the opcode to the non-SP version, because
411 /// we're replacing the frame index with a non-SP register.
412 static unsigned convertToNonSPOpcode(unsigned Opcode) {
413   switch (Opcode) {
414   case ARM::tLDRspi:
415     return ARM::tLDRi;
416 
417   case ARM::tSTRspi:
418     return ARM::tSTRi;
419   }
420 
421   return Opcode;
422 }
423 
424 bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II,
425                                           unsigned FrameRegIdx,
426                                           Register FrameReg, int &Offset,
427                                           const ARMBaseInstrInfo &TII) const {
428   MachineInstr &MI = *II;
429   MachineBasicBlock &MBB = *MI.getParent();
430   MachineFunction &MF = *MBB.getParent();
431   assert(MBB.getParent()->getSubtarget<ARMSubtarget>().isThumb1Only() &&
432          "This isn't needed for thumb2!");
433   DebugLoc dl = MI.getDebugLoc();
434   MachineInstrBuilder MIB(*MBB.getParent(), &MI);
435   unsigned Opcode = MI.getOpcode();
436   const MCInstrDesc &Desc = MI.getDesc();
437   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
438 
439   if (Opcode == ARM::tADDframe) {
440     Offset += MI.getOperand(FrameRegIdx+1).getImm();
441     Register DestReg = MI.getOperand(0).getReg();
442 
443     emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII,
444                               *this);
445     MBB.erase(II);
446     return true;
447   } else {
448     if (AddrMode != ARMII::AddrModeT1_s)
449       llvm_unreachable("Unsupported addressing mode!");
450 
451     unsigned ImmIdx = FrameRegIdx + 1;
452     int InstrOffs = MI.getOperand(ImmIdx).getImm();
453     unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
454     unsigned Scale = 4;
455 
456     Offset += InstrOffs * Scale;
457     assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!");
458 
459     // Common case: small offset, fits into instruction.
460     MachineOperand &ImmOp = MI.getOperand(ImmIdx);
461     int ImmedOffset = Offset / Scale;
462     unsigned Mask = (1 << NumBits) - 1;
463 
464     if ((unsigned)Offset <= Mask * Scale) {
465       // Replace the FrameIndex with the frame register (e.g., sp).
466       Register DestReg = FrameReg;
467 
468       // In case FrameReg is a high register, move it to a low reg to ensure it
469       // can be used as an operand.
470       if (ARM::hGPRRegClass.contains(FrameReg) && FrameReg != ARM::SP) {
471         DestReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
472         BuildMI(MBB, II, dl, TII.get(ARM::tMOVr), DestReg)
473             .addReg(FrameReg)
474             .add(predOps(ARMCC::AL));
475       }
476 
477       MI.getOperand(FrameRegIdx).ChangeToRegister(DestReg, false);
478       ImmOp.ChangeToImmediate(ImmedOffset);
479 
480       // If we're using a register where sp was stored, convert the instruction
481       // to the non-SP version.
482       unsigned NewOpc = convertToNonSPOpcode(Opcode);
483       if (NewOpc != Opcode && FrameReg != ARM::SP)
484         MI.setDesc(TII.get(NewOpc));
485 
486       return true;
487     }
488 
489     // The offset doesn't fit, but we may be able to put some of the offset into
490     // the ldr to simplify the generation of the rest of it.
491     NumBits = 5;
492     Mask = (1 << NumBits) - 1;
493     InstrOffs = 0;
494     auto &ST = MF.getSubtarget<ARMSubtarget>();
495     // If using the maximum ldr offset will put the rest into the range of a
496     // single sp-relative add then do so.
497     if (FrameReg == ARM::SP && Offset - (Mask * Scale) <= 1020) {
498       InstrOffs = Mask;
499     } else if (ST.genExecuteOnly()) {
500       // With execute-only the offset is generated either with movw+movt or an
501       // add+lsl sequence. If subtracting an offset will make the top half zero
502       // then that saves a movt or lsl+add. Otherwise if we don't have movw then
503       // we may be able to subtract a value such that it makes the bottom byte
504       // zero, saving an add.
505       unsigned BottomBits = (Offset / Scale) & Mask;
506       bool CanMakeBottomByteZero = ((Offset - BottomBits * Scale) & 0xff) == 0;
507       bool TopHalfZero = (Offset & 0xffff0000) == 0;
508       bool CanMakeTopHalfZero = ((Offset - Mask * Scale) & 0xffff0000) == 0;
509       if (!TopHalfZero && CanMakeTopHalfZero)
510         InstrOffs = Mask;
511       else if (!ST.useMovt() && CanMakeBottomByteZero)
512         InstrOffs = BottomBits;
513     }
514     ImmOp.ChangeToImmediate(InstrOffs);
515     Offset -= InstrOffs * Scale;
516   }
517 
518   return Offset == 0;
519 }
520 
521 void ThumbRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
522                                           int64_t Offset) const {
523   const MachineFunction &MF = *MI.getParent()->getParent();
524   const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
525   if (!STI.isThumb1Only())
526     return ARMBaseRegisterInfo::resolveFrameIndex(MI, BaseReg, Offset);
527 
528   const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
529   int Off = Offset; // ARM doesn't need the general 64-bit offsets
530   unsigned i = 0;
531 
532   while (!MI.getOperand(i).isFI()) {
533     ++i;
534     assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
535   }
536   bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
537   assert (Done && "Unable to resolve frame index!");
538   (void)Done;
539 }
540 
541 bool ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
542                                             int SPAdj, unsigned FIOperandNum,
543                                             RegScavenger *RS) const {
544   MachineInstr &MI = *II;
545   MachineBasicBlock &MBB = *MI.getParent();
546   MachineFunction &MF = *MBB.getParent();
547   const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
548   if (!STI.isThumb1Only())
549     return ARMBaseRegisterInfo::eliminateFrameIndex(II, SPAdj, FIOperandNum,
550                                                     RS);
551 
552   Register VReg;
553   const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
554   DebugLoc dl = MI.getDebugLoc();
555   MachineInstrBuilder MIB(*MBB.getParent(), &MI);
556 
557   Register FrameReg;
558   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
559   const ARMFrameLowering *TFI = getFrameLowering(MF);
560   int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
561 
562   // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
563   // call frame setup/destroy instructions have already been eliminated.  That
564   // means the stack pointer cannot be used to access the emergency spill slot
565   // when !hasReservedCallFrame().
566 #ifndef NDEBUG
567   if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
568     assert(STI.getFrameLowering()->hasReservedCallFrame(MF) &&
569            "Cannot use SP to access the emergency spill slot in "
570            "functions without a reserved call frame");
571     assert(!MF.getFrameInfo().hasVarSizedObjects() &&
572            "Cannot use SP to access the emergency spill slot in "
573            "functions with variable sized frame objects");
574   }
575 #endif // NDEBUG
576 
577   // Special handling of dbg_value instructions.
578   if (MI.isDebugValue()) {
579     MI.getOperand(FIOperandNum).  ChangeToRegister(FrameReg, false /*isDef*/);
580     MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
581     return false;
582   }
583 
584   // Modify MI as necessary to handle as much of 'Offset' as possible
585   assert(MF.getInfo<ARMFunctionInfo>()->isThumbFunction() &&
586          "This eliminateFrameIndex only supports Thumb1!");
587   if (rewriteFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
588     return true;
589 
590   // If we get here, the immediate doesn't fit into the instruction.  We folded
591   // as much as possible above, handle the rest, providing a register that is
592   // SP+LargeImm.
593   assert(Offset && "This code isn't needed if offset already handled!");
594 
595   unsigned Opcode = MI.getOpcode();
596 
597   // Remove predicate first.
598   int PIdx = MI.findFirstPredOperandIdx();
599   if (PIdx != -1)
600     removeOperands(MI, PIdx);
601 
602   if (MI.mayLoad()) {
603     // Use the destination register to materialize sp + offset.
604     Register TmpReg = MI.getOperand(0).getReg();
605     bool UseRR = false;
606     if (Opcode == ARM::tLDRspi) {
607       if (FrameReg == ARM::SP || STI.genExecuteOnly())
608         emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg,
609                                  Offset, false, TII, *this);
610       else {
611         emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
612         if (!ARM::hGPRRegClass.contains(FrameReg)) {
613           UseRR = true;
614         } else {
615           // If FrameReg is a high register, add the reg values in a separate
616           // instruction as the load won't be able to access it.
617           BuildMI(MBB, II, dl, TII.get(ARM::tADDhirr), TmpReg)
618               .addReg(TmpReg)
619               .addReg(FrameReg)
620               .add(predOps(ARMCC::AL));
621         }
622       }
623     } else {
624       emitThumbRegPlusImmediate(MBB, II, dl, TmpReg, FrameReg, Offset, TII,
625                                 *this);
626     }
627 
628     MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
629     MI.getOperand(FIOperandNum).ChangeToRegister(TmpReg, false, false, true);
630     if (UseRR) {
631       assert(!ARM::hGPRRegClass.contains(FrameReg) &&
632              "Thumb1 loads can't use high register");
633       // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
634       // register. The offset is already handled in the vreg value.
635       MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
636                                                      false);
637     }
638   } else if (MI.mayStore()) {
639       VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
640       bool UseRR = false;
641 
642       if (Opcode == ARM::tSTRspi) {
643         if (FrameReg == ARM::SP || STI.genExecuteOnly())
644           emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg,
645                                    Offset, false, TII, *this);
646         else {
647           emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
648           if (!ARM::hGPRRegClass.contains(FrameReg)) {
649             UseRR = true;
650           } else {
651             // If FrameReg is a high register, add the reg values in a separate
652             // instruction as the load won't be able to access it.
653             BuildMI(MBB, II, dl, TII.get(ARM::tADDhirr), VReg)
654                 .addReg(VReg)
655                 .addReg(FrameReg)
656                 .add(predOps(ARMCC::AL));
657           }
658         }
659       } else
660         emitThumbRegPlusImmediate(MBB, II, dl, VReg, FrameReg, Offset, TII,
661                                   *this);
662       MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
663       MI.getOperand(FIOperandNum).ChangeToRegister(VReg, false, false, true);
664       if (UseRR) {
665         assert(!ARM::hGPRRegClass.contains(FrameReg) &&
666                "Thumb1 stores can't use high register");
667         // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
668         // register. The offset is already handled in the vreg value.
669         MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
670                                                        false);
671       }
672   } else {
673     llvm_unreachable("Unexpected opcode!");
674   }
675 
676   // Add predicate back if it's needed.
677   if (MI.isPredicable())
678     MIB.add(predOps(ARMCC::AL));
679   return false;
680 }
681 
682 bool
683 ThumbRegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
684   if (MF.getSubtarget<ARMSubtarget>().isThumb1Only()) {
685     // For Thumb1, the emergency spill slot must be some small positive
686     // offset from the base/stack pointer.
687     return false;
688   }
689   // For Thumb2, put the emergency spill slot next to FP.
690   return true;
691 }
692